filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_9690 | import os
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.cuda.amp import GradScaler, autocast
from scripts.focalloss import FocalLoss
from Transformers_VQA.dataset_final import make_final_loader
from Transformers_VQA.modified_uniter_attnbias_rcnn_SBERT_graph import Modified_Uniter_attnbias_rcnn_SBERT_graph
def train():
# Constant setup
BATCH_SIZE = 3
BATCH_SIZE_DEV = 1
LR = 5e-6
N_EPOCH = 30
GAMMA = 2
ALPHA = 5
print(f'UNITERonCLIPBERT_attnbias_rcnn_SBERT_graph batch_size={BATCH_SIZE}, Adam_lr={LR}, FocalAlpha={ALPHA}, GAMMA={GAMMA}')
torch.manual_seed(21)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
# Make loaders
train_loader = make_final_loader('train', BATCH_SIZE, rcnn=True)
dev_loader = make_final_loader('dev', BATCH_SIZE_DEV, rcnn=True)
# Setup Tensorboard
writer = SummaryWriter(comment = f'UNITERonCLIPBERT_attnbias_rcnn_SBERT_graph batch_size={BATCH_SIZE}, Adam_lr={LR}, FocalAlpha={ALPHA}, GAMMA={GAMMA}')
# multiply by layer, do an embedding for indices 1 to 12
mask_stepper = torch.ones(1, 12, 512, 512).to(device)
for i in range(12):
mask_stepper[0, i, :, :] *= i+1
# Eval for F1
def eval(model):
model.eval()
with torch.no_grad():
total_hit, total_pred_positive, total_truth_positive, total_loss, total_pred = 0, 0, 0, [], 0
for idx, batch in enumerate(dev_loader):
input_ids = batch['input_ids'].to(device)
txt_seg_ids = batch['txt_seg_ids'].to(device)
vis_feats = batch['vis_feats'].to(device)
obj_embs = batch['obj_embs_SBERT'].to(device)
obj_ids = batch['obj_ids'].to(device)
pos_x = batch['pos_x'].to(device)
pos_y = batch['pos_y'].to(device)
pos_z = batch['pos_z'].to(device)
bboxes = batch['bboxes'].to(device)
vis_seg = batch['vis_seg'].to(device)
extended_attention_mask = batch['extended_attention_mask'].to(device)
output_mask = batch['output_mask'].to(device)
reference = batch['reference'].to(device)
scene_seg = batch['scene_segs'].to(device)
rel_mask_left = batch['rel_mask_left'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_right = batch['rel_mask_right'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_up = batch['rel_mask_up'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_down = batch['rel_mask_down'].to(device).unsqueeze(0).unsqueeze(2)
rel_masks = torch.cat((rel_mask_left, rel_mask_right, rel_mask_up, rel_mask_down), axis=0)
rel_masks = rel_masks * mask_stepper
pred = model(input_ids , txt_seg_ids, vis_feats, obj_embs, obj_ids, pos_x, pos_y, pos_z, bboxes, vis_seg, extended_attention_mask, scene_seg, rel_masks)
pred = pred.reshape(1,-1)
pred = pred[output_mask==1].reshape(-1,1)
truth = reference.float().reshape(-1,1)
loss = criterion(pred, truth).detach()
pred_bin = pred > 0
truth_bin = truth > 0.5
hit = torch.sum(pred_bin*truth_bin == 1).detach()
pred_positive = torch.sum(pred > 0).detach()
truth_positive = torch.sum(truth > 0.5).detach()
total_loss.append(float(loss))
total_hit += int(hit)
total_pred_positive += int(pred_positive)
total_truth_positive += int(truth_positive)
total_pred += int(pred.shape[0])
print('#pred positives',total_pred_positive)
print('#groundtruth positives',total_truth_positive)
print('#total pred', total_pred)
print('#hit', total_hit)
total_loss = sum(total_loss)/len(total_loss)
if (total_pred_positive == 0):
total_pred_positive = 1e10
prec = total_hit / total_pred_positive
recall = total_hit / total_truth_positive
try:
f1 = 2/(1/prec + 1/recall)
except:
f1 = 0
return total_loss, prec, recall, f1
# Training setup
model = Modified_Uniter_attnbias_rcnn_SBERT_graph().to(device)
criterion = FocalLoss(gamma=GAMMA, alpha=ALPHA)
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
scaler = GradScaler()
# Train
n_iter = 0
n_prev_iter = 0
running_loss = 0
for epoch in range(N_EPOCH):
for batch_idx, batch in enumerate(train_loader):
model.train()
optimizer.zero_grad()
input_ids = batch['input_ids'].to(device)
txt_seg_ids = batch['txt_seg_ids'].to(device)
vis_feats = batch['vis_feats'].to(device)
obj_embs = batch['obj_embs_SBERT'].to(device)
obj_ids = batch['obj_ids'].to(device)
pos_x = batch['pos_x'].to(device)
pos_y = batch['pos_y'].to(device)
pos_z = batch['pos_z'].to(device)
bboxes = batch['bboxes'].to(device)
vis_seg = batch['vis_seg'].to(device)
extended_attention_mask = batch['extended_attention_mask'].to(device)
output_mask = batch['output_mask'].to(device)
reference = batch['reference'].to(device)
scene_seg = batch['scene_segs'].to(device)
rel_mask_left = batch['rel_mask_left'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_right = batch['rel_mask_right'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_up = batch['rel_mask_up'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_down = batch['rel_mask_down'].to(device).unsqueeze(0).unsqueeze(2)
rel_masks = torch.cat((rel_mask_left, rel_mask_right, rel_mask_up, rel_mask_down), axis=0)
rel_masks = rel_masks * mask_stepper
truth = reference.float().reshape(-1,1)
# To fix: NaN under mixed precision
# with autocast():
# pred = model(input_ids , txt_seg_ids, vis_feats, obj_embs, obj_ids, pos_x, pos_y, pos_z, bboxes, vis_seg, extended_attention_mask)
# pred = pred.reshape(1,-1)
# pred = pred[output_mask==1].reshape(-1,1)
# loss = criterion(pred, truth)
# scaler.scale(loss).backward()
# scaler.step(optimizer)
# scaler.update()
pred = model(input_ids , txt_seg_ids, vis_feats, obj_embs, obj_ids, pos_x, pos_y, pos_z, bboxes, vis_seg, extended_attention_mask, scene_seg, rel_masks)
pred = pred.reshape(1,-1)
pred = pred[output_mask==1].reshape(-1,1)
loss = criterion(pred, truth)
loss.backward()
optimizer.step()
n_iter += 1
writer.add_scalar('Loss/train_batch', loss, n_iter)
running_loss += loss.detach()
if batch_idx % 2000 == 0:
print(pred.reshape(-1))
print(truth.reshape(-1))
print(running_loss/(n_iter-n_prev_iter))
loss, prec, recall, f1 = eval(model)
writer.add_scalar('Loss/train_avg', running_loss/(n_iter-n_prev_iter), n_iter)
n_prev_iter = n_iter
running_loss = 0
writer.add_scalar('Loss/dev', loss, n_iter)
writer.add_scalar('Precision/dev', prec, n_iter)
writer.add_scalar('Recall/dev', recall, n_iter)
writer.add_scalar('F1/dev', f1, n_iter)
try:
os.makedirs(f'./checkpoint/UNITERonCLIPBERT_attnbiasRcnn_SBERT_graph_n_batchsize{BATCH_SIZE}_lr{LR}_FocalALPHA{ALPHA}_GAMMA{GAMMA}')
except:
pass
torch.save({
'epoch': epoch,
'step': n_iter,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'dev_loss': loss,
}, f'./checkpoint/UNITERonCLIPBERT_attnbiasRcnn_SBERT_graph_n_batchsize{BATCH_SIZE}_lr{LR}_FocalALPHA{ALPHA}_GAMMA{GAMMA}/{epoch}_{batch_idx}_{loss}_{f1}.bin')
print('DONE !!!')
if __name__ == '__main__':
train() |
the-stack_0_9693 | import csv
import flask
import operator
import sqlite3
app = flask.Flask(__name__)
@app.route('/api/ships/')
def ships():
cursor = get_db()
result = []
for ship in cursor.execute('select * from Ships'):
result.append({'name': str(ship[0]), 'imo': str(ship[1])})
return flask.jsonify(result)
@app.route('/api/positions/<imo>/')
def positions(imo):
cursor = get_db()
result = []
for p in cursor.execute('select * from Positions where imo = "%s"' % imo):
result.append({
'timestamp': p[1],
'latitude': p[2],
'longitude': p[3]
})
if not result:
flask.abort(404)
return flask.jsonify(sorted(result, key=operator.itemgetter('timestamp'),
reverse=True))
def _build_db():
conn = sqlite3.connect(':memory:')
c = conn.cursor()
c.execute('''create table Ships (name text, imo integer)''')
c.execute('''create table Positions (
imo integer,
timestamp text,
latitude real,
longitude real)''')
conn.commit()
return conn
def fill_db(positions_file):
conn = _build_db()
cursor = conn.cursor()
ships = [('Mathilde Maersk', 9632179),
('Australian Spirit', 9247455),
('MSC Preziosa', 9595321)]
for ship in ships:
cursor.execute('insert into Ships values ("%s", %s)' % ship)
for row in csv.reader(positions_file):
cursor.execute('''insert into Positions values (%s, "%s", %s, %s)''' %
tuple(row))
conn.commit()
return cursor
def get_db():
if 'db' not in flask.g:
cursor = fill_db(open('positions.csv'))
flask.g.db = cursor
return flask.g.db
def main():
app.run(debug=True)
if __name__ == '__main__':
main()
|
the-stack_0_9694 | import subprocess
import tempfile
import os
clone_dir = os.path.join(tempfile.gettempdir(), 'scikit-beam-examples')
try:
ret = subprocess.check_output(
['git', 'clone', 'https://github.com/scikit-beam/scikit-beam-examples',
clone_dir])
except subprocess.CalledProcessError:
print("scikit-beam-examples already exists at %s" % (clone_dir))
print("resetting to the master branch")
subprocess.Popen(['git', 'remote', 'update'], cwd=clone_dir)
subprocess.Popen(['git', 'reset', '--hard', 'origin/master'],
cwd=clone_dir)
|
the-stack_0_9696 | import argparse
import os
import torchvision.transforms as transforms
from src.datamanager import *
from src.datamanager import DataProvider
import src.datamanager.utils as datautils
from PIL import Image
from src.configs import *
from src.ml.net import PyNet
from src.results import performance
from src.results.reid import ReIDPerformance
import torchvision.transforms.functional as F
from src.ml.net.pt import factory as model_factory
from operator import itemgetter
from src.visualization import visualizer
import src.pyrnet.model as reid_model
import src.pyrnet.features as features
import src.pyrnet.metric as metric
# Arg parser
parser = argparse.ArgumentParser(description='ReID Net')
parser.add_argument('--dataset', default='Market-1501', type=str, metavar='STR', help='dataset name (default: Market-1501)')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N', help='number of data loading workers (default: 10)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--print-freq', '--p', default=20, type=int, metavar='N', help='print frequency (default: 20)')
parser.add_argument('--net', default='densenet', type=str, metavar='STR', help='network model (default: densenet)')
parser.add_argument('--depth', default=201, type=int, metavar='N', help='network model depth (default: 201)')
parser.add_argument('--bottleneck-size', default=512, type=int, metavar='N', help='classifier bottleneck size (default: 512)')
parser.add_argument('--pyr-feature-size', default=256, type=int, metavar='N', help='pyramidal maps (default: 256)')
parser.add_argument('--pyr-feature-size-dynamic', default=True, type=bool, metavar='B', help='pyramidal feature size dependent on detail level (default: True)')
parser.add_argument('--pyr-operator', default='max_pool', type=str, metavar='STR', help='pyramidal operator (default: max_pool)')
parser.add_argument('--pyr-levels', default=-1, type=int, metavar='N', help='pyramidal levels (default: -1 => dynamic)')
parser.add_argument('--metric', default='euclidean', type=str, metavar='STR', help='metric (default: euclidean')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='filename of latest checkpoint (default: empty => latest experiment)')
parser.add_argument('--epoch', default=100, type=int, metavar='N', help='evaluation epoch, used only if --checkpoint is not set (default: 100)')
parser.add_argument('--rerank', default=False, type=bool, metavar='B', help='enable re-ranking (default: False)')
def get_args():
return parser.parse_args()
""" ================================================================================================================
EVALUATION
============================================================================================================ """
def evaluate(args, net=None, dset_train=None, dset_test=None,
display_ranking_image_index=(0, 2, 10, 40, 60, 100, 120, 140, 160, 180, 200),
layer_embeddings=('emb\\bottleneck1', 'emb\\bottleneck2', 'emb\\bottleneck3', 'emb\\bottleneck4'),
sample_size=(384, 192)):
# Just check the parsed arguments
print(vars(args))
""" ----------------------------------------------------------------------------------------------------------------
DATA
------------------------------------------------------------------------------------------------------------ """
# Imagenet Normalization
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# Data transformations
transformations = DataTransformer([
transforms.Resize(sample_size, interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize
])
transformations_flipped = DataTransformer([
transforms.Resize(sample_size, interpolation=Image.BICUBIC),
transforms.Lambda(lambda x: F.hflip(x)),
transforms.ToTensor(),
normalize])
# Dataset
if dset_train is None or dset_test is None:
dset_opts = DatasetConfig(args.dataset, None, (0.5, 0.5), cam_pair=(-1, -1))
dset = DatasetReID(dset_opts.name, os.path.join('data', dset_opts.name),
im_size=dset_opts.imsize, in_memory=False, keep_aspect_ratio=True)
# Splits
dset_train, dset_test = dset.split(dset_opts.split, save_load=True, make_each_split_contiguous=True)
# Data provider
data_provider = DataProvider(dset_test, loader=datautils.load_image, transform=transformations)
num_classes = len(dset_train.classes)
# Data provider flipped
data_provider_flipped = DataProvider(dset_test, loader=datautils.load_image, transform=transformations_flipped)
""" ----------------------------------------------------------------------------------------------------------------
MODEL
------------------------------------------------------------------------------------------------------------ """
if net is None:
# From which checkpoint do we need to load the model?
checkpoint = args.checkpoint
if checkpoint == '':
folder = os.path.join('data', 'experiments', args.dataset, os.listdir(os.path.join('data', 'experiments', args.dataset))[-1])
checkpoint = os.path.join(folder, 'checkpoint_epoch-{}.pth.tar'.format(args.epoch))
folder = os.path.dirname(checkpoint)
# Get model (load it from checkpoint!)
model = reid_model.get_model(args.net, args.depth,
data_provider[0][0].size(), num_classes,
bottleneck_size=args.bottleneck_size,
pyr_feature_size=args.pyr_feature_size,
pyr_operator=args.pyr_operator, pyr_feature_size_dynamic=args.pyr_feature_size_dynamic,
checkpoint_path=checkpoint)
# Make it parallel..
model = model_factory.make_it_parallel(model, 'multigpu')
# Net initialization
net = PyNet()
net.model = model
net.exp_folder = folder
# Move to GPU (if available)
net.to_gpu()
""" ----------------------------------------------------------------------------------------------------------------
FEATURES
------------------------------------------------------------------------------------------------------------ """
X_norm = []
data_providers = [data_provider, data_provider_flipped]
# Get features from the data providers
for ii, dp in enumerate(data_providers):
X_norm_new = features.get_features(net, [dp], layer_embeddings=layer_embeddings, batch_size=args.batch_size, workers=args.workers)
# Concat
X_norm.extend(X_norm_new)
""" ----------------------------------------------------------------------------------------------------------------
MATCH
------------------------------------------------------------------------------------------------------------ """
# Match images (re-rank if needed)
D, D_rerank, probe_info, gallery_info = metric.get_distance(dset_test, X_norm, args.metric, re_rank=args.rerank)
# Unpack matching info
probe_idx, probe_id, probe_cam = probe_info
gallery_idx, gallery_id, gallery_cam = gallery_info
""" ----------------------------------------------------------------------------------------------------------------
PERFORMANCE
------------------------------------------------------------------------------------------------------------ """
# CMC
reid_perf = ReIDPerformance()
reid_perf.compute(-D, probe_idx, gallery_idx,probe_id, gallery_id, probe_cam=probe_cam, gallery_cam=gallery_cam)
data_to_print = [reid_perf.cmc[0], reid_perf.cmc[4], reid_perf.cmc[9], reid_perf.cmc[19], reid_perf.cmc[49], reid_perf.nauc, reid_perf.ap.mean()*100]
res_string = 'CMC [1-5-10-20-50]: {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} -- nAUC: {:.2f} -- mAP: {:.2f}'.format(*data_to_print)
print(res_string)
# CMC plot
visualizer.plot_cmc(reid_perf.cmc, legend='Rank-1: {:.2f} - mAP: {:.2f}'.format(reid_perf.cmc[0], reid_perf.ap.mean()*100), title=str(layer_embeddings), render_on_screen=True)
reid_perf_rerank = ReIDPerformance()
if D_rerank is not None:
# CMC with rerank
reid_perf_rerank.compute(-D_rerank, probe_idx, gallery_idx,probe_id, gallery_id, probe_cam=probe_cam, gallery_cam=gallery_cam)
data_to_print = [reid_perf_rerank.cmc[0], reid_perf_rerank.cmc[4], reid_perf_rerank.cmc[9], reid_perf_rerank.cmc[19], reid_perf_rerank.cmc[49], reid_perf_rerank.nauc, reid_perf_rerank.ap.mean()*100]
res_string = 'Re-Rank => CMC [1-5-10-20-50]: {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} -- nAUC: {:.2f} -- mAP: {:.2f}'.format(*data_to_print)
print(res_string)
img = visualizer.plot_cmc(reid_perf_rerank.cmc, legend='Rank-1: {:.2f} - mAP: {:.2f}'.format(reid_perf_rerank.cmc[0], reid_perf_rerank.ap.mean()*100), title=str(layer_embeddings), render_on_screen=True)
# Matching images
dp = DataProvider(dset_test, loader=datautils.load_image)
matching_images = performance.get_matching_images(dp, dp, reid_perf.matching_indexes, N=15, selected_indexes=display_ranking_image_index)
matching_ids = itemgetter(*display_ranking_image_index)(reid_perf.matching_ids)
visualizer.display_ranked_matching_images(matching_images, matching_ids=matching_ids, im_size=(256, 256), render_on_screen=True, true_match_line_width=10)
return reid_perf, reid_perf_rerank
if __name__ == '__main__':
args = get_args()
evaluate(args)
|
the-stack_0_9697 | from abc import ABCMeta, abstractmethod
from collections.abc import Iterable
from numbers import Integral
from typing import Callable
import operator
from functools import reduce
import numpy as np
import scipy.sparse as ss
from ._umath import elemwise
from ._utils import _zero_of_dtype, html_table, equivalent, normalize_axis
_reduce_super_ufunc = {np.add: np.multiply, np.multiply: np.power}
class SparseArray:
"""
An abstract base class for all the sparse array classes.
Attributes
----------
dtype : numpy.dtype
The data type of this array.
fill_value : scalar
The fill value of this array.
"""
__metaclass__ = ABCMeta
def __init__(self, shape, fill_value=None):
if not isinstance(shape, Iterable):
shape = (shape,)
if not all(isinstance(l, Integral) and int(l) >= 0 for l in shape):
raise ValueError(
"shape must be an non-negative integer or a tuple "
"of non-negative integers."
)
self.shape = tuple(int(l) for l in shape)
if fill_value is not None:
if not hasattr(fill_value, "dtype") or fill_value.dtype != self.dtype:
self.fill_value = self.dtype.type(fill_value)
else:
self.fill_value = fill_value
else:
self.fill_value = _zero_of_dtype(self.dtype)
dtype = None
@property
@abstractmethod
def nnz(self):
"""
The number of nonzero elements in this array. Note that any duplicates in
:code:`coords` are counted multiple times. To avoid this, call :obj:`COO.sum_duplicates`.
Returns
-------
int
The number of nonzero elements in this array.
See Also
--------
DOK.nnz : Equivalent :obj:`DOK` array property.
numpy.count_nonzero : A similar Numpy function.
scipy.sparse.coo_matrix.nnz : The Scipy equivalent property.
Examples
--------
>>> import numpy as np
>>> from sparse import COO
>>> x = np.array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 0])
>>> np.count_nonzero(x)
6
>>> s = COO.from_numpy(x)
>>> s.nnz
6
>>> np.count_nonzero(x) == s.nnz
True
"""
@property
def ndim(self):
"""
The number of dimensions of this array.
Returns
-------
int
The number of dimensions of this array.
See Also
--------
DOK.ndim : Equivalent property for :obj:`DOK` arrays.
numpy.ndarray.ndim : Numpy equivalent property.
Examples
--------
>>> from sparse import COO
>>> import numpy as np
>>> x = np.random.rand(1, 2, 3, 1, 2)
>>> s = COO.from_numpy(x)
>>> s.ndim
5
>>> s.ndim == x.ndim
True
"""
return len(self.shape)
@property
def size(self):
"""
The number of all elements (including zeros) in this array.
Returns
-------
int
The number of elements.
See Also
--------
numpy.ndarray.size : Numpy equivalent property.
Examples
--------
>>> from sparse import COO
>>> import numpy as np
>>> x = np.zeros((10, 10))
>>> s = COO.from_numpy(x)
>>> s.size
100
"""
# We use this instead of np.prod because np.prod
# returns a float64 for an empty shape.
return reduce(operator.mul, self.shape, 1)
@property
def density(self):
"""
The ratio of nonzero to all elements in this array.
Returns
-------
float
The ratio of nonzero to all elements.
See Also
--------
COO.size : Number of elements.
COO.nnz : Number of nonzero elements.
Examples
--------
>>> import numpy as np
>>> from sparse import COO
>>> x = np.zeros((8, 8))
>>> x[0, :] = 1
>>> s = COO.from_numpy(x)
>>> s.density
0.125
"""
return self.nnz / self.size
def _repr_html_(self):
"""
Diagnostic report about this array.
Renders in Jupyter.
"""
return html_table(self)
@abstractmethod
def asformat(self, format):
"""
Convert this sparse array to a given format.
Parameters
----------
format : str
A format string.
Returns
-------
out : SparseArray
The converted array.
Raises
------
NotImplementedError
If the format isn't supported.
"""
@abstractmethod
def todense(self):
"""
Convert this :obj:`SparseArray` array to a dense :obj:`numpy.ndarray`. Note that
this may take a large amount of memory and time.
Returns
-------
numpy.ndarray
The converted dense array.
See Also
--------
DOK.todense : Equivalent :obj:`DOK` array method.
COO.todense : Equivalent :obj:`COO` array method.
scipy.sparse.coo_matrix.todense : Equivalent Scipy method.
Examples
--------
>>> import sparse
>>> x = np.random.randint(100, size=(7, 3))
>>> s = sparse.COO.from_numpy(x)
>>> x2 = s.todense()
>>> np.array_equal(x, x2)
True
"""
def _make_shallow_copy_of(self, other):
self.__dict__ = other.__dict__.copy()
def __array__(self, *args, **kwargs):
from ._settings import AUTO_DENSIFY
if not AUTO_DENSIFY:
raise RuntimeError(
"Cannot convert a sparse array to dense automatically. "
"To manually densify, use the todense method."
)
return np.asarray(self.todense(), *args, **kwargs)
def __array_function__(self, func, types, args, kwargs):
import sparse as module
sparse_func = None
try:
submodules = getattr(func, "__module__", "numpy").split(".")[1:]
for submodule in submodules:
module = getattr(module, submodule)
sparse_func = getattr(module, func.__name__)
except AttributeError:
pass
else:
return sparse_func(*args, **kwargs)
try:
sparse_func = getattr(type(self), func.__name__)
except AttributeError:
pass
if (
not isinstance(sparse_func, Callable)
and len(args) == 1
and len(kwargs) == 0
):
try:
return getattr(self, func.__name__)
except AttributeError:
pass
if sparse_func is None:
return NotImplemented
return sparse_func(*args, **kwargs)
@staticmethod
def _reduce(method, *args, **kwargs):
assert len(args) == 1
self = args[0]
if isinstance(self, ss.spmatrix):
self = type(self).from_scipy_sparse(self)
return self.reduce(method, **kwargs)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.pop("out", None)
if out is not None and not all(isinstance(x, type(self)) for x in out):
return NotImplemented
if getattr(ufunc, "signature", None) is not None:
return self.__array_function__(
ufunc, (np.ndarray, type(self)), inputs, kwargs
)
if out is not None:
kwargs["dtype"] = out[0].dtype
if method == "outer":
method = "__call__"
cum_ndim = 0
inputs_transformed = []
for inp in reversed(inputs):
inputs_transformed.append(inp[(Ellipsis,) + (None,) * cum_ndim])
cum_ndim += inp.ndim
inputs = tuple(reversed(inputs_transformed))
if method == "__call__":
result = elemwise(ufunc, *inputs, **kwargs)
elif method == "reduce":
result = SparseArray._reduce(ufunc, *inputs, **kwargs)
else:
return NotImplemented
if out is not None:
(out,) = out
if out.shape != result.shape:
raise ValueError(
"non-broadcastable output operand with shape %s "
"doesn't match the broadcast shape %s" % (out.shape, result.shape)
)
out._make_shallow_copy_of(result)
return out
return result
def reduce(self, method, axis=(0,), keepdims=False, **kwargs):
"""
Performs a reduction operation on this array.
Parameters
----------
method : numpy.ufunc
The method to use for performing the reduction.
axis : Union[int, Iterable[int]], optional
The axes along which to perform the reduction. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
kwargs : dict
Any extra arguments to pass to the reduction operation.
See Also
--------
numpy.ufunc.reduce : A similar Numpy method.
COO.reduce : This method implemented on COO arrays.
GCXS.reduce : This method implemented on GCXS arrays.
"""
axis = normalize_axis(axis, self.ndim)
zero_reduce_result = method.reduce([self.fill_value, self.fill_value], **kwargs)
reduce_super_ufunc = None
if not equivalent(zero_reduce_result, self.fill_value):
reduce_super_ufunc = _reduce_super_ufunc.get(method, None)
if reduce_super_ufunc is None:
raise ValueError(
"Performing this reduction operation would produce "
"a dense result: %s" % str(method)
)
if not isinstance(axis, tuple):
axis = (axis,)
out = self._reduce_calc(method, axis, keepdims, **kwargs)
if len(out) == 1:
return out[0]
data, counts, axis, n_cols, arr_attrs = out
result_fill_value = self.fill_value
if reduce_super_ufunc is None:
missing_counts = counts != n_cols
data[missing_counts] = method(
data[missing_counts], self.fill_value, **kwargs
)
else:
data = method(
data,
reduce_super_ufunc(self.fill_value, n_cols - counts),
).astype(data.dtype)
result_fill_value = reduce_super_ufunc(self.fill_value, n_cols)
out = self._reduce_return(data, arr_attrs, result_fill_value)
if keepdims:
shape = list(self.shape)
for ax in axis:
shape[ax] = 1
out = out.reshape(shape)
if out.ndim == 0:
return out[()]
return out
def _reduce_calc(self, method, axis, keepdims, **kwargs):
raise NotImplementedError
def _reduce_return(self, data, arr_attrs, result_fill_value):
raise NotImplementedError
def sum(self, axis=None, keepdims=False, dtype=None, out=None):
"""
Performs a sum operation along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to sum. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
dtype: numpy.dtype
The data type of the output array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.sum` : Equivalent numpy function.
scipy.sparse.coo_matrix.sum : Equivalent Scipy function.
"""
return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype)
def max(self, axis=None, keepdims=False, out=None):
"""
Maximize along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to maximize. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
dtype: numpy.dtype
The data type of the output array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.max` : Equivalent numpy function.
scipy.sparse.coo_matrix.max : Equivalent Scipy function.
"""
return np.maximum.reduce(self, out=out, axis=axis, keepdims=keepdims)
amax = max
def any(self, axis=None, keepdims=False, out=None):
"""
See if any values along array are ``True``. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to minimize. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.all` : Equivalent numpy function.
"""
return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims)
def all(self, axis=None, keepdims=False, out=None):
"""
See if all values in an array are ``True``. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to minimize. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.all` : Equivalent numpy function.
"""
return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)
def min(self, axis=None, keepdims=False, out=None):
"""
Minimize along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to minimize. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
dtype: numpy.dtype
The data type of the output array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.min` : Equivalent numpy function.
scipy.sparse.coo_matrix.min : Equivalent Scipy function.
"""
return np.minimum.reduce(self, out=out, axis=axis, keepdims=keepdims)
amin = min
def prod(self, axis=None, keepdims=False, dtype=None, out=None):
"""
Performs a product operation along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to multiply. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
dtype: numpy.dtype
The data type of the output array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.prod` : Equivalent numpy function.
"""
return np.multiply.reduce(
self, out=out, axis=axis, keepdims=keepdims, dtype=dtype
)
def round(self, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
See also
--------
:obj:`numpy.round` : NumPy equivalent ufunc.
:obj:`COO.elemwise`: Apply an arbitrary element-wise function to one or two
arguments.
"""
if out is not None and not isinstance(out, tuple):
out = (out,)
return self.__array_ufunc__(
np.round, "__call__", self, decimals=decimals, out=out
)
round_ = round
def clip(self, min=None, max=None, out=None):
"""
Clip (limit) the values in the array.
Return an array whose values are limited to ``[min, max]``. One of min
or max must be given.
See Also
--------
sparse.clip : For full documentation and more details.
numpy.clip : Equivalent NumPy function.
"""
if min is None and max is None:
raise ValueError("One of max or min must be given.")
if out is not None and not isinstance(out, tuple):
out = (out,)
return self.__array_ufunc__(
np.clip, "__call__", self, a_min=min, a_max=max, out=out
)
def astype(self, dtype, casting="unsafe", copy=True):
"""
Copy of the array, cast to a specified type.
See also
--------
scipy.sparse.coo_matrix.astype : SciPy sparse equivalent function
numpy.ndarray.astype : NumPy equivalent ufunc.
:obj:`COO.elemwise`: Apply an arbitrary element-wise function to one or two
arguments.
"""
# this matches numpy's behavior
if self.dtype == dtype and not copy:
return self
return self.__array_ufunc__(
np.ndarray.astype, "__call__", self, dtype=dtype, copy=copy, casting=casting
)
def mean(self, axis=None, keepdims=False, dtype=None, out=None):
"""
Compute the mean along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to compute the mean. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
dtype: numpy.dtype
The data type of the output array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
numpy.ndarray.mean : Equivalent numpy method.
scipy.sparse.coo_matrix.mean : Equivalent Scipy method.
Notes
-----
* This function internally calls :obj:`COO.sum_duplicates` to bring the
array into canonical form.
* The :code:`out` parameter is provided just for compatibility with
Numpy and isn't actually supported.
Examples
--------
You can use :obj:`COO.mean` to compute the mean of an array across any
dimension.
>>> from sparse import COO
>>> x = np.array([[1, 2, 0, 0],
... [0, 1, 0, 0]], dtype='i8')
>>> s = COO.from_numpy(x)
>>> s2 = s.mean(axis=1)
>>> s2.todense() # doctest: +SKIP
array([0.5, 1.5, 0., 0.])
You can also use the :code:`keepdims` argument to keep the dimensions
after the mean.
>>> s3 = s.mean(axis=0, keepdims=True)
>>> s3.shape
(1, 4)
You can pass in an output datatype, if needed.
>>> s4 = s.mean(axis=0, dtype=np.float16)
>>> s4.dtype
dtype('float16')
By default, this reduces the array down to one number, computing the
mean along all axes.
>>> s.mean()
0.5
"""
if axis is None:
axis = tuple(range(self.ndim))
elif not isinstance(axis, tuple):
axis = (axis,)
den = reduce(operator.mul, (self.shape[i] for i in axis), 1)
if dtype is None:
if issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = inter_dtype = np.dtype("f8")
else:
dtype = self.dtype
inter_dtype = (
np.dtype("f4") if issubclass(dtype.type, np.float16) else dtype
)
else:
inter_dtype = dtype
num = self.sum(axis=axis, keepdims=keepdims, dtype=inter_dtype)
if num.ndim:
out = np.true_divide(num, den, casting="unsafe")
return out.astype(dtype) if out.dtype != dtype else out
return np.divide(num, den, dtype=dtype, out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the variance along the gi66ven axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to compute the variance. Uses all axes by default.
dtype : numpy.dtype, optional
The output datatype.
out: SparseArray, optional
The array to write the output to.
ddof: int
The degrees of freedom.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
numpy.ndarray.var : Equivalent numpy method.
Notes
-----
* This function internally calls :obj:`COO.sum_duplicates` to bring the
array into canonical form.
Examples
--------
You can use :obj:`COO.var` to compute the variance of an array across any
dimension.
>>> from sparse import COO
>>> x = np.array([[1, 2, 0, 0],
... [0, 1, 0, 0]], dtype='i8')
>>> s = COO.from_numpy(x)
>>> s2 = s.var(axis=1)
>>> s2.todense() # doctest: +SKIP
array([0.6875, 0.1875])
You can also use the :code:`keepdims` argument to keep the dimensions
after the variance.
>>> s3 = s.var(axis=0, keepdims=True)
>>> s3.shape
(1, 4)
You can pass in an output datatype, if needed.
>>> s4 = s.var(axis=0, dtype=np.float16)
>>> s4.dtype
dtype('float16')
By default, this reduces the array down to one number, computing the
variance along all axes.
>>> s.var()
0.5
"""
axis = normalize_axis(axis, self.ndim)
if axis is None:
axis = tuple(range(self.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
rcount = reduce(operator.mul, (self.shape[a] for a in axis), 1)
# Make this warning show up on top.
if ddof >= rcount:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype("f8")
arrmean = self.sum(axis, dtype=dtype, keepdims=True)
np.divide(arrmean, rcount, out=arrmean)
x = self - arrmean
if issubclass(self.dtype.type, np.complexfloating):
x = x.real * x.real + x.imag * x.imag
else:
x = np.multiply(x, x, out=x)
ret = x.sum(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# Compute degrees of freedom and make sure it is not negative.
rcount = max([rcount - ddof, 0])
ret = ret[...]
np.divide(ret, rcount, out=ret, casting="unsafe")
return ret[()]
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the standard deviation along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to compute the standard deviation. Uses
all axes by default.
dtype : numpy.dtype, optional
The output datatype.
out: SparseArray, optional
The array to write the output to.
ddof: int
The degrees of freedom.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
numpy.ndarray.std : Equivalent numpy method.
Notes
-----
* This function internally calls :obj:`COO.sum_duplicates` to bring the
array into canonical form.
Examples
--------
You can use :obj:`COO.std` to compute the standard deviation of an array
across any dimension.
>>> from sparse import COO
>>> x = np.array([[1, 2, 0, 0],
... [0, 1, 0, 0]], dtype='i8')
>>> s = COO.from_numpy(x)
>>> s2 = s.std(axis=1)
>>> s2.todense() # doctest: +SKIP
array([0.8291562, 0.4330127])
You can also use the :code:`keepdims` argument to keep the dimensions
after the standard deviation.
>>> s3 = s.std(axis=0, keepdims=True)
>>> s3.shape
(1, 4)
You can pass in an output datatype, if needed.
>>> s4 = s.std(axis=0, dtype=np.float16)
>>> s4.dtype
dtype('float16')
By default, this reduces the array down to one number, computing the
standard deviation along all axes.
>>> s.std() # doctest: +SKIP
0.7071067811865476
"""
ret = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
ret = np.sqrt(ret)
return ret
@property
def real(self):
"""The real part of the array.
Examples
--------
>>> from sparse import COO
>>> x = COO.from_numpy([1 + 0j, 0 + 1j])
>>> x.real.todense() # doctest: +SKIP
array([1., 0.])
>>> x.real.dtype
dtype('float64')
Returns
-------
out : SparseArray
The real component of the array elements. If the array dtype is
real, the dtype of the array is used for the output. If the array
is complex, the output dtype is float.
See Also
--------
numpy.ndarray.real : NumPy equivalent attribute.
numpy.real : NumPy equivalent function.
"""
return self.__array_ufunc__(np.real, "__call__", self)
@property
def imag(self):
"""The imaginary part of the array.
Examples
--------
>>> from sparse import COO
>>> x = COO.from_numpy([1 + 0j, 0 + 1j])
>>> x.imag.todense() # doctest: +SKIP
array([0., 1.])
>>> x.imag.dtype
dtype('float64')
Returns
-------
out : SparseArray
The imaginary component of the array elements. If the array dtype
is real, the dtype of the array is used for the output. If the
array is complex, the output dtype is float.
See Also
--------
numpy.ndarray.imag : NumPy equivalent attribute.
numpy.imag : NumPy equivalent function.
"""
return self.__array_ufunc__(np.imag, "__call__", self)
def conj(self):
"""Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Examples
--------
>>> from sparse import COO
>>> x = COO.from_numpy([1 + 2j, 2 - 1j])
>>> res = x.conj()
>>> res.todense() # doctest: +SKIP
array([1.-2.j, 2.+1.j])
>>> res.dtype
dtype('complex128')
Returns
-------
out : SparseArray
The complex conjugate, with same dtype as the input.
See Also
--------
numpy.ndarray.conj : NumPy equivalent method.
numpy.conj : NumPy equivalent function.
"""
return np.conj(self)
|
the-stack_0_9698 | """Webroot plugin."""
import argparse
import collections
import json
import logging
from typing import DefaultDict
from typing import Dict
from typing import List
from typing import Set
from acme import challenges
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot._internal import cli
from certbot.achallenges import KeyAuthorizationAnnotatedChallenge as AnnotatedChallenge
from certbot.compat import filesystem
from certbot.compat import os
from certbot.display import ops
from certbot.display import util as display_util
from certbot.plugins import common
from certbot.plugins import util
from certbot.util import safe_open
logger = logging.getLogger(__name__)
_WEB_CONFIG_CONTENT = """\
<?xml version="1.0" encoding="UTF-8" ?>
<!--Generated by Certbot-->
<configuration>
<system.webServer>
<staticContent>
<remove fileExtension="."/>
<mimeMap fileExtension="." mimeType="text/plain" />
</staticContent>
</system.webServer>
</configuration>
"""
# This list references the hashes of all versions of the web.config files that Certbot could
# have generated during an HTTP-01 challenge. If you modify _WEB_CONFIG_CONTENT, you MUST add
# the new hash in this list.
_WEB_CONFIG_SHA256SUMS = [
"20c5ca1bd58fa8ad5f07a2f1be8b7cbb707c20fcb607a8fc8db9393952846a97",
"8d31383d3a079d2098a9d0c0921f4ab87e708b9868dc3f314d54094c2fe70336"
]
class Authenticator(common.Plugin, interfaces.Authenticator):
"""Webroot Authenticator."""
description = "Place files in webroot directory"
MORE_INFO = """\
Authenticator plugin that performs http-01 challenge by saving
necessary validation resources to appropriate paths on the file
system. It expects that there is some other HTTP server configured
to serve all files under specified web root ({0})."""
def more_info(self): # pylint: disable=missing-function-docstring
return self.MORE_INFO.format(self.conf("path"))
@classmethod
def add_parser_arguments(cls, add):
add("path", "-w", default=[], action=_WebrootPathAction,
help="public_html / webroot path. This can be specified multiple "
"times to handle different domains; each domain will have "
"the webroot path that preceded it. For instance: `-w "
"/var/www/example -d example.com -d www.example.com -w "
"/var/www/thing -d thing.net -d m.thing.net` (default: Ask)")
add("map", default={}, action=_WebrootMapAction,
help="JSON dictionary mapping domains to webroot paths; this "
"implies -d for each entry. You may need to escape this from "
"your shell. E.g.: --webroot-map "
'\'{"eg1.is,m.eg1.is":"/www/eg1/", "eg2.is":"/www/eg2"}\' '
"This option is merged with, but takes precedence over, -w / "
"-d entries. At present, if you put webroot-map in a config "
"file, it needs to be on a single line, like: webroot-map = "
'{"example.com":"/var/www"}.')
def auth_hint(self, failed_achalls): # pragma: no cover
return ("The Certificate Authority failed to download the temporary challenge files "
"created by Certbot. Ensure that the listed domains serve their content from "
"the provided --webroot-path/-w and that files created there can be downloaded "
"from the internet.")
def get_chall_pref(self, domain): # pragma: no cover
# pylint: disable=unused-argument,missing-function-docstring
return [challenges.HTTP01]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.full_roots: Dict[str, str] = {}
self.performed: DefaultDict[str, Set[AnnotatedChallenge]] = collections.defaultdict(set)
# stack of dirs successfully created by this authenticator
self._created_dirs: List[str] = []
def prepare(self): # pylint: disable=missing-function-docstring
pass
def perform(self, achalls): # pylint: disable=missing-function-docstring
self._set_webroots(achalls)
self._create_challenge_dirs()
return [self._perform_single(achall) for achall in achalls]
def _set_webroots(self, achalls):
if self.conf("path"):
webroot_path = self.conf("path")[-1]
logger.info("Using the webroot path %s for all unmatched domains.",
webroot_path)
for achall in achalls:
self.conf("map").setdefault(achall.domain, webroot_path)
else:
known_webroots = list(set(self.conf("map").values()))
for achall in achalls:
if achall.domain not in self.conf("map"):
new_webroot = self._prompt_for_webroot(achall.domain,
known_webroots)
# Put the most recently input
# webroot first for easy selection
try:
known_webroots.remove(new_webroot)
except ValueError:
pass
known_webroots.insert(0, new_webroot)
self.conf("map")[achall.domain] = new_webroot
def _prompt_for_webroot(self, domain, known_webroots):
webroot = None
while webroot is None:
if known_webroots:
# Only show the menu if we have options for it
webroot = self._prompt_with_webroot_list(domain, known_webroots)
if webroot is None:
webroot = self._prompt_for_new_webroot(domain)
else:
# Allow prompt to raise PluginError instead of looping forever
webroot = self._prompt_for_new_webroot(domain, True)
return webroot
def _prompt_with_webroot_list(self, domain, known_webroots):
path_flag = "--" + self.option_name("path")
while True:
code, index = display_util.menu(
"Select the webroot for {0}:".format(domain),
["Enter a new webroot"] + known_webroots,
cli_flag=path_flag, force_interactive=True)
if code == display_util.CANCEL:
raise errors.PluginError(
"Every requested domain must have a "
"webroot when using the webroot plugin.")
return None if index == 0 else known_webroots[index - 1] # code == display_util.OK
def _prompt_for_new_webroot(self, domain, allowraise=False):
code, webroot = ops.validated_directory(
_validate_webroot,
"Input the webroot for {0}:".format(domain),
force_interactive=True)
if code == display_util.CANCEL:
if not allowraise:
return None
raise errors.PluginError(
"Every requested domain must have a "
"webroot when using the webroot plugin.")
return _validate_webroot(webroot) # code == display_util.OK
def _create_challenge_dirs(self):
path_map = self.conf("map")
if not path_map:
raise errors.PluginError(
"Missing parts of webroot configuration; please set either "
"--webroot-path and --domains, or --webroot-map. Run with "
" --help webroot for examples.")
for name, path in path_map.items():
self.full_roots[name] = os.path.join(path, os.path.normcase(
challenges.HTTP01.URI_ROOT_PATH))
logger.debug("Creating root challenges validation dir at %s",
self.full_roots[name])
# Change the permissions to be writable (GH #1389)
# Umask is used instead of chmod to ensure the client can also
# run as non-root (GH #1795)
old_umask = filesystem.umask(0o022)
try:
# We ignore the last prefix in the next iteration,
# as it does not correspond to a folder path ('/' or 'C:')
for prefix in sorted(util.get_prefixes(self.full_roots[name])[:-1], key=len):
if os.path.isdir(prefix):
# Don't try to create directory if it already exists, as some filesystems
# won't reliably raise EEXIST or EISDIR if directory exists.
continue
try:
# Set owner as parent directory if possible, apply mode for Linux/Windows.
# For Linux, this is coupled with the "umask" call above because
# os.mkdir's "mode" parameter may not always work:
# https://docs.python.org/3/library/os.html#os.mkdir
filesystem.mkdir(prefix, 0o755)
self._created_dirs.append(prefix)
try:
filesystem.copy_ownership_and_apply_mode(
path, prefix, 0o755, copy_user=True, copy_group=True)
except (OSError, AttributeError) as exception:
logger.warning("Unable to change owner and uid of webroot directory")
logger.debug("Error was: %s", exception)
except OSError as exception:
raise errors.PluginError(
"Couldn't create root for {0} http-01 "
"challenge responses: {1}".format(name, exception))
finally:
filesystem.umask(old_umask)
# On Windows, generate a local web.config file that allows IIS to serve expose
# challenge files despite the fact they do not have a file extension.
if not filesystem.POSIX_MODE:
web_config_path = os.path.join(self.full_roots[name], "web.config")
if os.path.exists(web_config_path):
logger.info("A web.config file has not been created in "
"%s because another one already exists.", self.full_roots[name])
continue
logger.info("Creating a web.config file in %s to allow IIS "
"to serve challenge files.", self.full_roots[name])
with safe_open(web_config_path, mode="w", chmod=0o644) as web_config:
web_config.write(_WEB_CONFIG_CONTENT)
def _get_validation_path(self, root_path, achall):
return os.path.join(root_path, achall.chall.encode("token"))
def _perform_single(self, achall):
response, validation = achall.response_and_validation()
root_path = self.full_roots[achall.domain]
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Attempting to save validation to %s", validation_path)
# Change permissions to be world-readable, owner-writable (GH #1795)
old_umask = filesystem.umask(0o022)
try:
with safe_open(validation_path, mode="wb", chmod=0o644) as validation_file:
validation_file.write(validation.encode())
finally:
filesystem.umask(old_umask)
self.performed[root_path].add(achall)
return response
def cleanup(self, achalls): # pylint: disable=missing-function-docstring
for achall in achalls:
root_path = self.full_roots.get(achall.domain, None)
if root_path is not None:
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Removing %s", validation_path)
os.remove(validation_path)
self.performed[root_path].remove(achall)
if not filesystem.POSIX_MODE:
web_config_path = os.path.join(root_path, "web.config")
if os.path.exists(web_config_path):
sha256sum = crypto_util.sha256sum(web_config_path)
if sha256sum in _WEB_CONFIG_SHA256SUMS:
logger.info("Cleaning web.config file generated by Certbot in %s.",
root_path)
os.remove(web_config_path)
else:
logger.info("Not cleaning up the web.config file in %s "
"because it is not generated by Certbot.", root_path)
not_removed: List[str] = []
while self._created_dirs:
path = self._created_dirs.pop()
try:
os.rmdir(path)
except OSError as exc:
not_removed.insert(0, path)
logger.info("Challenge directory %s was not empty, didn't remove", path)
logger.debug("Error was: %s", exc)
self._created_dirs = not_removed
logger.debug("All challenges cleaned up")
class _WebrootMapAction(argparse.Action):
"""Action class for parsing webroot_map."""
def __call__(self, parser, namespace, webroot_map, option_string=None):
for domains, webroot_path in json.loads(webroot_map).items():
webroot_path = _validate_webroot(webroot_path)
namespace.webroot_map.update(
(d, webroot_path) for d in cli.add_domains(namespace, domains))
class _WebrootPathAction(argparse.Action):
"""Action class for parsing webroot_path."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._domain_before_webroot = False
def __call__(self, parser, namespace, webroot_path, option_string=None):
if self._domain_before_webroot:
raise errors.PluginError(
"If you specify multiple webroot paths, "
"one of them must precede all domain flags")
if namespace.webroot_path:
# Apply previous webroot to all matched
# domains before setting the new webroot path
prev_webroot = namespace.webroot_path[-1]
for domain in namespace.domains:
namespace.webroot_map.setdefault(domain, prev_webroot)
elif namespace.domains:
self._domain_before_webroot = True
namespace.webroot_path.append(_validate_webroot(webroot_path))
def _validate_webroot(webroot_path):
"""Validates and returns the absolute path of webroot_path.
:param str webroot_path: path to the webroot directory
:returns: absolute path of webroot_path
:rtype: str
"""
if not os.path.isdir(webroot_path):
raise errors.PluginError(webroot_path + " does not exist or is not a directory")
return os.path.abspath(webroot_path)
|
the-stack_0_9699 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from neodroid.utilities.unity_specifications import Motion, Reaction, ReactionParameters
__author__ = "Christian Heider Nielsen"
import neodroid.wrappers.formal_wrapper as neo
def construct_reactions(env):
parameters = ReactionParameters(
terminable=True,
step=True,
reset=False,
configure=False,
describe=False,
episode_count=True,
)
action1, action2 = env.action_space.sample()
motions = [
Motion("ActorActor", "ActorTransformX_", action1),
Motion("ActorActor", "ActorTransformZ_", action2),
]
reactions = [
Reaction(
environment_name=f"EnvironmentPrototypingEnvironment",
parameters=parameters,
motions=motions,
)
]
for i in range(19):
action1, action2 = env.action_space.sample()
motions = [
Motion("ActorActor", "ActorTransformX_", action1),
Motion("ActorActor", "ActorTransformZ_", action2),
]
reaction = Reaction(
environment_name=f"Environment(Clone){i}PrototypingEnvironment",
parameters=parameters,
motions=motions,
)
reactions.append(reaction)
return reactions
def main():
_environments = neo.NeodroidEnvironment(name="multienv", connect_to_running=True)
while _environments.is_connected:
reactions = construct_reactions(_environments)
states = _environments.react(reactions)
if __name__ == "__main__":
main()
|
the-stack_0_9700 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
u"""
OMD Livestatus dynamic inventory script
=======================================
If running as an OMD site user, i.e. if ${OMD_ROOT} is set, we try to
connect to the Livestatus socket at the default location
${OMD_ROOT}/tmp/run/live
Alternatively, the path to the Livestatus socket can be set from the
environment via
export OMD_LIVESTATUS_SOCKET=/omd/sites/mysite/tmp/run/live
or on the command-line with --socket.
Inspired by the DigitalOcean inventory script:
https://github.com/ansible/ansible/blob/devel/contrib/inventory/digital_ocean.py
:author: Andreas Härpfer <[email protected]>
:updated by: Samuel López @elchicodepython
"""
from __future__ import print_function
__version__ = '0.2'
import datetime
import os
import sys
import optparse # Legacy ... 2.6 still out there
import socket
import subprocess
try:
import json
except ImportError:
import simplejson as json
try:
maketrans = str.maketrans # Python 3
except AttributeError:
from string import maketrans # Python 2
class OMDLivestatusInventory(object):
#: default socket path
_def_socket_path = u'/tmp/run/live'
#: Livestatus query string
_def_host_query = (u'GET hosts\n'
'Columns: address name alias groups host_custom_variables\n'
'OutputFormat: json\n')
#: string of bad characters in host or group names
_bad_chars = u'.,;:[]/ '
#: replacement char for bad chars
_replacement_char = u'_'
def __init__(self, location=None, method='socket', by_ip=False):
self.data = {}
self.inventory = {}
self.method = method
#: translation table for sanitizing group names
#
# See the following to find out why this can't be a class variable:
# http://stackoverflow.com/questions/13905741/accessing-class-variables-from-a-list-comprehension-in-the-class-definition
# This version only works for byte strings but not for unicode :-(
#self._trans_table = maketrans(
# self._bad_chars, self._replacement_char * len(_bad_chars))
# Unicode version; see also:
# http://stackoverflow.com/questions/1324067/how-do-i-get-str-translate-to-work-with-unicode-strings
self._trans_table = dict((ord(char), self._replacement_char)
for char in self._bad_chars)
if not location:
if 'OMD_LIVESTATUS_SOCKET' in os.environ:
self.location = os.environ['OMD_LIVESTATUS_SOCKET']
elif 'OMD_ROOT' in os.environ:
self.location = (os.environ['OMD_ROOT']
+ OMDLivestatusInventory._def_socket_path)
else:
raise EnvironmentError(
'Unable to determine location of Livestatus socket. '
'Try setting OMD_LIVESTATUS_SOCKET environment variable.'
)
else:
self.location = location
self.load_from_omd()
if by_ip:
self.build_inventory_by_ip()
else:
self.build_inventory_by_name()
def load_from_omd(self):
"""Read host data from livestatus socket.
Populates self.data['hosts'].
"""
self.data['hosts'] = []
if self.method == 'ssh':
answer = json.loads(self._read_from_ssh())
else:
answer = json.loads(self._read_from_socket())
for host in answer:
self.data['hosts'].append(
dict(zip((u'ip', u'name', u'alias', u'groups', u'custom_vars'),
host)))
def _read_from_socket(self):
"""Read data from local Livestatus socket."""
if ':' in self.location:
s = self._get_tcp_socket()
else:
s = self._get_unix_socket()
s.send(self._def_host_query.encode('utf-8'))
s.shutdown(socket.SHUT_WR)
chunks = []
while len(chunks) == 0 or chunks[-1] != "":
chunks.append(s.recv(4096))
s.close()
reply = "".join(chunks)
return reply
def _get_unix_socket(self):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(location)
return s
def _get_tcp_socket(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address, port = self.location.split(':')
s.connect((address, int(port)))
return s
def _read_from_ssh(self):
"""Read data from remote Livestatus socket via SSH.
Assumes non-interactive (e.g. via ssh-agent) access to the
remote host. The `unixcat` command (part of Livestatus) has to
be available via $PATH at the remote end.
"""
l = self.location.split(':', 1)
l.append('.' + OMDLivestatusInventory._def_socket_path)
host, path = l[0], l[1]
cmd = ['ssh', host,
'-o', 'BatchMode=yes',
'-o', 'ConnectTimeout=10',
'unixcat {0}'.format(path)]
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(
input=OMDLivestatusInventory._def_host_query.encode('utf-8'))
if p.returncode:
raise RuntimeError(err)
return out.decode('utf-8')
def build_inventory_by_ip(self):
"""Create Ansible inventory by IP address instead of by name.
Cave: contrary to hostnames IP addresses are not guaranteed to
be unique in OMD! Since there is only one set of hostvars for a
given IP, duplicate IPs might mean that you are loosing data.
When creating static inventory output we issue a warning for
duplicate IPs. For the default JSON output this warning is
suppressed since Ansible discards any output on STDERR.
Group names are sanitized to not contain characters that Ansible
can't digest. In particular group names in Ansible must not
contain blanks!
"""
inventory = {}
hostvars = {}
for host in self.data['hosts']:
for group in host['groups'] or [u'_NOGROUP']:
sanitized_group = group.translate(self._trans_table)
if sanitized_group in inventory:
inventory[sanitized_group].append(host['ip'])
else:
inventory[sanitized_group] = [host['ip']]
# Detect duplicate IPs in inventory. Keep first occurence
# in hostvars instead of overwriting with later data.
ip = host['ip']
if ip not in hostvars:
hostvars[ip] = {
'omd_name': host['name'],
'omd_alias': host['alias'],
'omd_custom_vars': host['custom_vars'],
}
#else:
# # duplicate IP
# pass
self.inventory = inventory
self.inventory['_meta'] = {
'hostvars': hostvars
}
def build_inventory_by_name(self):
"""Create Ansible inventory by OMD name.
Group names are sanitized to not contain characters that Ansible
can't digest. In particular group names in Ansible must not
contain blanks!
"""
inventory = {}
hostvars = {}
for host in self.data['hosts']:
for group in host['groups'] or [u'_NOGROUP']:
sanitized_group = group.translate(self._trans_table)
if sanitized_group in inventory:
inventory[sanitized_group].append(host['name'])
else:
inventory[sanitized_group] = [host['name']]
hostvars[host['name']] = {
'ansible_host': host['ip'],
'omd_alias': host['alias'],
'omd_custom_vars': host['custom_vars'],
}
self.inventory = inventory
self.inventory['_meta'] = {
'hostvars': hostvars
}
def list(self, indent=None, sort_keys=False):
"""Return full inventory data as JSON."""
return json.dumps(self.inventory, indent=indent, sort_keys=sort_keys)
def host(self, name, indent=None, sort_keys=False):
"""Return hostvars for a single host as JSON."""
if name in self.inventory['_meta']['hostvars']:
return(json.dumps(
self.inventory['_meta']['hostvars'][name],
indent=indent,
sort_keys=sort_keys
))
else:
return("{}")
def static(self):
"""Return data in static inventory format."""
out = []
out.append('# File created: {}'.format(datetime.datetime.now()))
for group in [k for k in self.inventory.keys() if k != '_meta']:
out.append('\n[{0}]'.format(group))
for host in self.inventory[group]:
vars = self.inventory['_meta']['hostvars'][host]
hostvars = []
for varname in vars.keys():
hostvars.append('{0}="{1}"'.format(varname, vars[varname]))
out.append('{0}\t{1}'.format(host, ' '.join(hostvars)))
return '\n'.join(out)
def _save_method(option, opt_str, value, parser):
parser.values.method = opt_str.lstrip('-')
parser.values.location = value
def parse_arguments():
"""Parse command line arguments."""
parser = optparse.OptionParser(version='%prog {0}'.format(__version__))
parser.set_defaults(method='socket')
output_group = optparse.OptionGroup(parser, 'Output formats')
output_group.add_option(
'--list', action='store_true', dest='list', default=False,
help='Return full Ansible inventory as JSON (default action).')
output_group.add_option(
'--host', type='string', dest='host', default=None,
help='Return Ansible hostvars for HOST as JSON.')
output_group.add_option(
'--static', action='store_true', dest='static', default=False,
help='Print inventory in static file format to stdout.')
output_group.add_option(
'--by-ip', action='store_true', dest='by_ip', default=False,
help='Create inventory by IP (instead of the default by name).')
parser.add_option_group(output_group)
connect_group = optparse.OptionGroup(parser, 'Connection options')
connect_group.add_option(
'--socket', type='string', dest='location', default=None,
action='callback', callback=_save_method,
help=('Set path to Livestatus socket. If omitted, try to use '
'$OMD_LIVESTATUS_SOCKET or $OMD_ROOT/tmp/run/live.'
))
connect_group.add_option(
'--ssh', type='string', dest='location', default=None,
action='callback', callback=_save_method,
help=('Connect to Livestatus socket via SSH. LOCATION has the '
'form [user@]host[:path], the default path is ./tmp/run/live.'
))
parser.add_option_group(connect_group)
opts, args = parser.parse_args()
# Make `list` the default action.
if not opts.host:
opts.list = True
return opts, args
if __name__ == '__main__':
opts, args = parse_arguments()
inv = OMDLivestatusInventory(opts.location,
method=opts.method,
by_ip=opts.by_ip)
if opts.static:
print(inv.static())
elif opts.list:
print(inv.list(indent=4, sort_keys=True))
elif opts.host:
print(inv.host(opts.host, indent=4, sort_keys=True))
else:
print('Missing command.')
sys.exit(1)
|
the-stack_0_9702 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Allocates IP address as per DHCP server in the uplink network.
"""
import datetime
import logging
import threading
import time
from ipaddress import IPv4Network, ip_address
from threading import Condition
from typing import MutableMapping, Optional
from magma.mobilityd.dhcp_desc import DHCPDescriptor, DHCPState
from magma.mobilityd.mac import MacAddress, hex_to_mac
from magma.mobilityd.uplink_gw import UplinkGatewayInfo
from scapy.all import AsyncSniffer
from scapy.layers.dhcp import BOOTP, DHCP
from scapy.layers.inet import IP, UDP
from scapy.layers.l2 import Dot1Q, Ether
from scapy.sendrecv import sendp
LOG = logging.getLogger('mobilityd.dhcp.sniff')
DHCP_ACTIVE_STATES = [DHCPState.ACK, DHCPState.OFFER]
class DHCPClient:
THREAD_YIELD_TIME = .1
def __init__(
self,
dhcp_store: MutableMapping[str, DHCPDescriptor],
gw_info: UplinkGatewayInfo,
dhcp_wait: Condition,
iface: str = "dhcp0",
lease_renew_wait_min: int = 200,
):
"""
Implement DHCP client to allocate IP for given Mac address.
DHCP client state is maintained in user provided hash table.
Args:
dhcp_store: maintain DHCP transactions, key is mac address.
gw_info_map: stores GW IP info from DHCP server
dhcp_wait: notify users on new DHCP packet
iface: DHCP egress and ingress interface.
"""
self._sniffer = AsyncSniffer(
iface=iface,
filter="udp and (port 67 or 68)",
store=False,
prn=self._rx_dhcp_pkt,
)
self.dhcp_client_state = dhcp_store # mac => DHCP_State
self.dhcp_gw_info = gw_info
self._dhcp_notify = dhcp_wait
self._dhcp_interface = iface
self._msg_xid = 0
self._lease_renew_wait_min = lease_renew_wait_min
self._monitor_thread = threading.Thread(
target=self._monitor_dhcp_state,
)
self._monitor_thread.daemon = True
self._monitor_thread_event = threading.Event()
def run(self):
"""
Start DHCP sniffer thread.
This initializes state required for DHCP sniffer thread anf starts it.
Returns: None
"""
self._sniffer.start()
LOG.info("DHCP sniffer started")
# give it time to schedule the thread and start sniffing.
time.sleep(self.THREAD_YIELD_TIME)
self._monitor_thread.start()
def stop(self):
self._sniffer.stop()
self._monitor_thread_event.set()
def send_dhcp_packet(
self, mac: MacAddress, vlan: int,
state: DHCPState,
dhcp_desc: Optional[DHCPDescriptor] = None,
) -> None:
"""
Send DHCP packet and record state in dhcp_client_state.
Args:
mac: MAC address of interface
state: state of DHCP packet
dhcp_desc: DHCP protocol state.
Returns:
"""
ciaddr = None
# generate DHCP request packet
if state == DHCPState.DISCOVER:
dhcp_opts = [("message-type", "discover")]
dhcp_desc = DHCPDescriptor(
mac=mac, ip="", vlan=vlan,
state_requested=DHCPState.DISCOVER,
)
self._msg_xid = self._msg_xid + 1
pkt_xid = self._msg_xid
elif state == DHCPState.REQUEST and dhcp_desc:
dhcp_opts = [
("message-type", "request"),
("requested_addr", dhcp_desc.ip),
("server_id", dhcp_desc.server_ip),
]
dhcp_desc.state_requested = DHCPState.REQUEST
pkt_xid = dhcp_desc.xid
ciaddr = dhcp_desc.ip
elif state == DHCPState.RELEASE and dhcp_desc:
dhcp_opts = [
("message-type", "release"),
("server_id", dhcp_desc.server_ip),
]
dhcp_desc.state_requested = DHCPState.RELEASE
self._msg_xid = self._msg_xid + 1
pkt_xid = self._msg_xid
ciaddr = dhcp_desc.ip
else:
LOG.warning(
"Unknown egress request mac %s state %s",
str(mac),
state,
)
return
dhcp_opts.append("end") # type: ignore[arg-type]
dhcp_desc.xid = pkt_xid
with self._dhcp_notify:
self.dhcp_client_state[mac.as_redis_key(vlan)] = dhcp_desc
pkt = Ether(src=str(mac), dst="ff:ff:ff:ff:ff:ff")
if vlan and vlan != "0":
pkt /= Dot1Q(vlan=int(vlan))
pkt /= IP(src="0.0.0.0", dst="255.255.255.255")
pkt /= UDP(sport=68, dport=67)
pkt /= BOOTP(op=1, chaddr=mac.as_hex(), xid=pkt_xid, ciaddr=ciaddr)
pkt /= DHCP(options=dhcp_opts)
LOG.debug("DHCP pkt xmit %s", pkt.show(dump=True))
sendp(pkt, iface=self._dhcp_interface, verbose=0)
def get_dhcp_desc(
self, mac: MacAddress,
vlan: str,
) -> Optional[DHCPDescriptor]:
"""
Get DHCP description for given MAC.
Args:
mac: Mac address of the client
vlan: vlan id if the IP allocated in a VLAN
Returns: Current DHCP info.
"""
key = mac.as_redis_key(vlan)
if key in self.dhcp_client_state:
return self.dhcp_client_state[key]
LOG.debug("lookup error for %s", str(key))
return None
def release_ip_address(self, mac: MacAddress, vlan: str):
"""
Release DHCP allocated IP.
Args:
mac: MAC address of the IP allocated.
vlan: vlan id if the IP allocated in a VLAN
Returns: None
"""
key = mac.as_redis_key(vlan)
if key not in self.dhcp_client_state:
LOG.error("Unallocated DHCP release for MAC: %s", key)
return
dhcp_desc = self.dhcp_client_state[key]
self.send_dhcp_packet(
mac,
dhcp_desc.vlan,
DHCPState.RELEASE,
dhcp_desc,
)
del self.dhcp_client_state[key]
def _monitor_dhcp_state(self):
"""
monitor DHCP client state.
"""
while True:
wait_time = self._lease_renew_wait_min
with self._dhcp_notify:
for dhcp_record in self.dhcp_client_state.values():
logging.debug("monitor: %s", dhcp_record)
# Only process active records.
if dhcp_record.state not in DHCP_ACTIVE_STATES:
continue
now = datetime.datetime.now()
logging.debug("monitor time: %s", now)
request_state = DHCPState.REQUEST
# in case of lost DHCP lease rediscover it.
if now >= dhcp_record.lease_expiration_time:
request_state = DHCPState.DISCOVER
if now >= dhcp_record.lease_renew_deadline:
logging.debug("sending lease renewal")
self.send_dhcp_packet(
dhcp_record.mac, dhcp_record.vlan,
request_state, dhcp_record,
)
else:
# Find next renewal wait time.
time_to_renew = dhcp_record.lease_renew_deadline - now
wait_time = min(
wait_time, time_to_renew.total_seconds(),
)
# default in wait is 30 sec
wait_time = max(wait_time, self._lease_renew_wait_min)
logging.debug("lease renewal check after: %s sec", wait_time)
self._monitor_thread_event.wait(wait_time)
if self._monitor_thread_event.is_set():
break
@staticmethod
def _get_option(packet, name):
for opt in packet[DHCP].options:
if opt[0] == name:
return opt[1]
return None
def _process_dhcp_pkt(self, packet, state: DHCPState):
LOG.debug("DHCP pkt recv %s", packet.show(dump=True))
mac_addr = MacAddress(hex_to_mac(packet[BOOTP].chaddr.hex()[0:12]))
vlan = ""
if Dot1Q in packet:
vlan = str(packet[Dot1Q].vlan)
mac_addr_key = mac_addr.as_redis_key(vlan)
with self._dhcp_notify:
if mac_addr_key in self.dhcp_client_state:
state_requested = self.dhcp_client_state[mac_addr_key].state_requested
if BOOTP not in packet or packet[BOOTP].yiaddr is None:
LOG.error("no ip offered")
return
ip_offered = packet[BOOTP].yiaddr
subnet_mask = self._get_option(packet, "subnet_mask")
if subnet_mask is not None:
ip_subnet = IPv4Network(
ip_offered + "/" + subnet_mask, strict=False,
)
else:
ip_subnet = IPv4Network(
ip_offered + "/" + "32", strict=False,
)
dhcp_server_ip = None
if IP in packet:
dhcp_server_ip = packet[IP].src
dhcp_router_opt = self._get_option(packet, "router")
if dhcp_router_opt is not None:
router_ip_addr = ip_address(dhcp_router_opt)
else:
# use DHCP as upstream router in case of missing Open 3.
router_ip_addr = dhcp_server_ip
self.dhcp_gw_info.update_ip(router_ip_addr, vlan)
lease_expiration_time = self._get_option(packet, "lease_time")
dhcp_state = DHCPDescriptor(
mac=mac_addr,
ip=ip_offered,
state=state,
vlan=vlan,
state_requested=state_requested,
subnet=str(ip_subnet),
server_ip=dhcp_server_ip,
router_ip=router_ip_addr,
lease_expiration_time=lease_expiration_time,
xid=packet[BOOTP].xid,
)
LOG.info(
"Record DHCP for: %s state: %s",
mac_addr_key,
dhcp_state,
)
self.dhcp_client_state[mac_addr_key] = dhcp_state
self._dhcp_notify.notifyAll()
if state == DHCPState.OFFER:
# let other thread work on fulfilling IP allocation
# request.
threading.Event().wait(self.THREAD_YIELD_TIME)
self.send_dhcp_packet(
mac_addr, vlan, DHCPState.REQUEST, dhcp_state,
)
else:
LOG.debug("Unknown MAC: %s ", packet.summary())
return
# ref: https://fossies.org/linux/scapy/scapy/layers/dhcp.py
def _rx_dhcp_pkt(self, packet):
if DHCP not in packet:
return
# Match DHCP offer
if packet[DHCP].options[0][1] == int(DHCPState.OFFER):
self._process_dhcp_pkt(packet, DHCPState.OFFER)
# Match DHCP ack
elif packet[DHCP].options[0][1] == int(DHCPState.ACK):
self._process_dhcp_pkt(packet, DHCPState.ACK)
# TODO handle other DHCP protocol events.
|
the-stack_0_9705 | #!/usr/bin/env python
#===============================================================================
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
try:
from packaging.version import Version
except ImportError:
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
import warnings
from sklearn.neighbors._base import NeighborsBase as sklearn_NeighborsBase
from sklearn.neighbors._ball_tree import BallTree
from sklearn.neighbors._kd_tree import KDTree
from sklearn.neighbors._base import _check_weights
from sklearn.neighbors._base import VALID_METRICS
from sklearn.neighbors._classification import KNeighborsClassifier as \
sklearn_KNeighborsClassifier
from sklearn.neighbors._unsupervised import NearestNeighbors as \
sklearn_NearestNeighbors
from sklearn.utils.validation import _deprecate_positional_args, check_is_fitted
from onedal.datatypes import _check_array, _num_features, _num_samples
from onedal.neighbors import KNeighborsClassifier as onedal_KNeighborsClassifier
from .._device_offload import dispatch, wrap_output_data
import numpy as np
from scipy import sparse as sp
if Version(sklearn_version) >= Version("0.24"):
class KNeighborsClassifier_(sklearn_KNeighborsClassifier):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = \
weights if Version(sklearn_version) >= Version("1.0") \
else _check_weights(weights)
elif Version(sklearn_version) >= Version("0.22"):
from sklearn.neighbors._base import SupervisedIntegerMixin as \
BaseSupervisedIntegerMixin
class KNeighborsClassifier_(sklearn_KNeighborsClassifier,
BaseSupervisedIntegerMixin):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
else:
from sklearn.neighbors.base import SupervisedIntegerMixin as \
BaseSupervisedIntegerMixin
class KNeighborsClassifier_(sklearn_KNeighborsClassifier,
BaseSupervisedIntegerMixin):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
class KNeighborsClassifier(KNeighborsClassifier_):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
def fit(self, X, y):
if Version(sklearn_version) >= Version("1.0"):
self._check_feature_names(X, reset=True)
if self.metric_params is not None and 'p' in self.metric_params:
if self.p is not None:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=2)
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.metric_params["p"]
else:
self.effective_metric_params_ = {}
effective_p = self.p
if self.metric in ["minkowski"]:
if effective_p < 1:
raise ValueError("p must be greater or equal to one for minkowski metric")
self.effective_metric_params_["p"] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == "minkowski":
p = self.effective_metric_params_.pop("p", 2)
if p < 1:
raise ValueError(
"p must be greater or equal to one for minkowski metric"
)
if p == 1:
self.effective_metric_ = "manhattan"
elif p == 2:
self.effective_metric_ = "euclidean"
elif p == np.inf:
self.effective_metric_ = "chebyshev"
else:
self.effective_metric_params_["p"] = p
if self.metric == "manhattan":
self.p = 1
if not isinstance(X, (KDTree, BallTree, sklearn_NeighborsBase)):
self._fit_X = _check_array(
X, dtype=[np.float64, np.float32], accept_sparse=True)
self.n_samples_fit_ = _num_samples(self._fit_X)
self.n_features_in_ = _num_features(self._fit_X)
if self.algorithm == "auto":
# A tree approach is better for small number of neighbors or small
# number of features, with KDTree generally faster when available
is_n_neighbors_valid_for_brute = self.n_neighbors is not None and \
self.n_neighbors >= self._fit_X.shape[0] // 2
if self._fit_X.shape[1] > 15 or is_n_neighbors_valid_for_brute:
self._fit_method = "brute"
else:
if self.effective_metric_ in VALID_METRICS["kd_tree"]:
self._fit_method = "kd_tree"
elif callable(self.effective_metric_) or \
self.effective_metric_ in \
VALID_METRICS["ball_tree"]:
self._fit_method = "ball_tree"
else:
self._fit_method = "brute"
else:
self._fit_method = self.algorithm
if hasattr(self, '_onedal_estimator'):
delattr(self, '_onedal_estimator')
# To cover test case when we pass patched
# estimator as an input for other estimator
if isinstance(X, sklearn_NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
self.n_samples_fit_ = X.n_samples_fit_
self.n_features_in_ = X.n_features_in_
if hasattr(X, '_onedal_estimator'):
if self._fit_method == "ball_tree":
X._tree = BallTree(
X._fit_X,
self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_,
)
elif self._fit_method == "kd_tree":
X._tree = KDTree(
X._fit_X,
self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_,
)
elif self._fit_method == "brute":
X._tree = None
else:
raise ValueError("algorithm = '%s' not recognized" % self.algorithm)
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
self.n_samples_fit_ = X.data.shape[0]
self.n_features_in_ = X.data.shape[1]
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
self.n_samples_fit_ = X.data.shape[0]
self.n_features_in_ = X.data.shape[1]
dispatch(self, 'neighbors.KNeighborsClassifier.fit', {
'onedal': self.__class__._onedal_fit,
'sklearn': sklearn_KNeighborsClassifier.fit,
}, X, y)
return self
@wrap_output_data
def predict(self, X):
check_is_fitted(self)
if Version(sklearn_version) >= Version("1.0"):
self._check_feature_names(X, reset=False)
return dispatch(self, 'neighbors.KNeighborsClassifier.predict', {
'onedal': self.__class__._onedal_predict,
'sklearn': sklearn_KNeighborsClassifier.predict,
}, X)
@wrap_output_data
def predict_proba(self, X):
check_is_fitted(self)
if Version(sklearn_version) >= Version("1.0"):
self._check_feature_names(X, reset=False)
return dispatch(self, 'neighbors.KNeighborsClassifier.predict_proba', {
'onedal': self.__class__._onedal_predict_proba,
'sklearn': sklearn_KNeighborsClassifier.predict_proba,
}, X)
@wrap_output_data
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
check_is_fitted(self)
if Version(sklearn_version) >= Version("1.0"):
self._check_feature_names(X, reset=False)
return dispatch(self, 'neighbors.KNeighborsClassifier.kneighbors', {
'onedal': self.__class__._onedal_kneighbors,
'sklearn': sklearn_KNeighborsClassifier.kneighbors,
}, X, n_neighbors, return_distance)
@wrap_output_data
def radius_neighbors(self, X=None, radius=None, return_distance=True,
sort_results=False):
_onedal_estimator = getattr(self, '_onedal_estimator', None)
if _onedal_estimator is not None or getattr(self, '_tree', 0) is None and \
self._fit_method == 'kd_tree':
if Version(sklearn_version) >= Version("0.24"):
sklearn_NearestNeighbors.fit(self, self._fit_X, getattr(self, '_y', None))
else:
sklearn_NearestNeighbors.fit(self, self._fit_X)
if Version(sklearn_version) >= Version("0.22"):
result = sklearn_NearestNeighbors.radius_neighbors(
self, X, radius, return_distance, sort_results)
else:
result = sklearn_NearestNeighbors.radius_neighbors(
self, X, radius, return_distance)
return result
def _onedal_gpu_supported(self, method_name, *data):
X_incorrect_type = isinstance(data[0], (KDTree, BallTree, sklearn_NeighborsBase))
if X_incorrect_type:
return False
if self._fit_method in ['auto', 'ball_tree']:
condition = self.n_neighbors is not None and \
self.n_neighbors >= self.n_samples_fit_ // 2
if self.n_features_in_ > 15 or condition:
result_method = 'brute'
else:
if self.effective_metric_ in ['euclidean']:
result_method = 'kd_tree'
else:
result_method = 'brute'
else:
result_method = self._fit_method
is_sparse = sp.isspmatrix(data[0])
is_single_output = False
class_count = 1
if len(data) > 1 or hasattr(self, '_onedal_estimator'):
# To check multioutput, might be overhead
if len(data) > 1:
y = np.asarray(data[1])
class_count = len(np.unique(y))
if hasattr(self, '_onedal_estimator'):
y = self._onedal_estimator._y
is_single_output = y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1
is_valid_for_brute = result_method in ['brute'] and \
self.effective_metric_ in ['manhattan',
'minkowski',
'euclidean',
'chebyshev',
'cosine']
is_valid_weights = self.weights in ['uniform', "distance"]
main_condition = is_valid_for_brute and not is_sparse and \
is_single_output and is_valid_weights
if method_name == 'neighbors.KNeighborsClassifier.fit':
return main_condition and class_count >= 2
if method_name in ['neighbors.KNeighborsClassifier.predict',
'neighbors.KNeighborsClassifier.predict_proba',
'neighbors.KNeighborsClassifier.kneighbors']:
return main_condition and hasattr(self, '_onedal_estimator')
raise RuntimeError(f'Unknown method {method_name} in {self.__class__.__name__}')
def _onedal_cpu_supported(self, method_name, *data):
X_incorrect_type = isinstance(data[0], (KDTree, BallTree, sklearn_NeighborsBase))
if X_incorrect_type:
return False
if self._fit_method in ['auto', 'ball_tree']:
condition = self.n_neighbors is not None and \
self.n_neighbors >= self.n_samples_fit_ // 2
if self.n_features_in_ > 15 or condition:
result_method = 'brute'
else:
if self.effective_metric_ in ['euclidean']:
result_method = 'kd_tree'
else:
result_method = 'brute'
else:
result_method = self._fit_method
is_sparse = sp.isspmatrix(data[0])
is_single_output = False
class_count = 1
if len(data) > 1 or hasattr(self, '_onedal_estimator'):
# To check multioutput, might be overhead
if len(data) > 1:
y = np.asarray(data[1])
class_count = len(np.unique(y))
if hasattr(self, '_onedal_estimator'):
y = self._onedal_estimator._y
is_single_output = y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1
is_valid_for_kd_tree = \
result_method in ['kd_tree'] and self.effective_metric_ in ['euclidean']
is_valid_for_brute = result_method in ['brute'] and \
self.effective_metric_ in ['manhattan',
'minkowski',
'euclidean',
'chebyshev',
'cosine']
is_valid_weights = self.weights in ['uniform', "distance"]
main_condition = (is_valid_for_kd_tree or is_valid_for_brute) and \
not is_sparse and is_single_output and is_valid_weights
if method_name == 'neighbors.KNeighborsClassifier.fit':
return main_condition and class_count >= 2
if method_name in ['neighbors.KNeighborsClassifier.predict',
'neighbors.KNeighborsClassifier.predict_proba',
'neighbors.KNeighborsClassifier.kneighbors']:
return main_condition and hasattr(self, '_onedal_estimator')
raise RuntimeError(f'Unknown method {method_name} in {self.__class__.__name__}')
def _onedal_fit(self, X, y, queue=None):
onedal_params = {
'n_neighbors': self.n_neighbors,
'weights': self.weights,
'algorithm': self.algorithm,
'metric': self.effective_metric_,
'p': self.p,
}
try:
requires_y = self._get_tags()["requires_y"]
except KeyError:
requires_y = False
self._onedal_estimator = onedal_KNeighborsClassifier(**onedal_params)
self._onedal_estimator.requires_y = requires_y
self._onedal_estimator.effective_metric_ = self.effective_metric_
self._onedal_estimator.effective_metric_params_ = self.effective_metric_params_
self._onedal_estimator.fit(X, y, queue=queue)
self._save_attributes()
def _onedal_predict(self, X, queue=None):
return self._onedal_estimator.predict(X, queue=queue)
def _onedal_predict_proba(self, X, queue=None):
return self._onedal_estimator.predict_proba(X, queue=queue)
def _onedal_kneighbors(self, X=None, n_neighbors=None,
return_distance=True, queue=None):
return self._onedal_estimator.kneighbors(
X, n_neighbors, return_distance, queue=queue)
def _save_attributes(self):
self.classes_ = self._onedal_estimator.classes_
self.n_features_in_ = self._onedal_estimator.n_features_in_
self.n_samples_fit_ = self._onedal_estimator.n_samples_fit_
self._fit_X = self._onedal_estimator._fit_X
self._y = self._onedal_estimator._y
self._fit_method = self._onedal_estimator._fit_method
self.outputs_2d_ = self._onedal_estimator.outputs_2d_
self._tree = self._onedal_estimator._tree
|
the-stack_0_9708 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2018, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image41.xlsx')
# Despite a lot of effort and testing I can't match Excel's
# calculations exactly for EMF files. The differences are are small
# (<1%) and in general aren't visible. The following ignore the
# elements where these differences occur until the they can be
# resolved. This issue doesn't occur for any other image type.
self.ignore_elements = {'xl/drawings/drawing1.xml': ['<xdr:rowOff>', '<xdr:colOff>', '<a:ext cx=']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'logo.emf')
workbook.close()
self.assertExcelEqual()
|
the-stack_0_9709 | import sys
sys.path.append("..")
from intcode import IntCodeMachine
from collections import defaultdict
def read_input():
f = open("input_day09.txt")
l = [int(n) for n in f.readline().strip().split(",")]
return defaultdict(int, enumerate(l))
def run():
input_list = read_input()
m = IntCodeMachine(input_list, lambda: 2, lambda x: sys.stdout.write(str(x)))
m.run()
print()
if __name__ == '__main__':
run()
|
the-stack_0_9710 | # Copyright (c) 2019 Remi Salmon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# imports
import gpxpy
import numpy as np
from datetime import datetime
from scipy.interpolate import interp1d, splprep, splev
# constants
EARTH_RADIUS = 6371e3 # meters
# functions
def gpx_interpolate(gpx_data, res, deg = 1):
# input: gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
# res = float
# deg = int
# output: gpx_data_interp = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
if not type(deg) is int:
raise TypeError('deg must be int')
if not 1 <= deg <= 5:
raise ValueError('deg must be in [1-5]')
if not len(gpx_data['lat']) > deg:
raise ValueError('number of data points must be > deg')
# interpolate spatial data
_gpx_data = gpx_remove_duplicate(gpx_data)
_gpx_dist = gpx_calculate_distance(_gpx_data, use_ele = True)
x = [_gpx_data[i] for i in ('lat', 'lon', 'ele') if _gpx_data[i]]
tck, _ = splprep(x, u = np.cumsum(_gpx_dist), k = deg, s = 0)
u_interp = np.linspace(0, np.sum(_gpx_dist), num = 1+int(np.sum(_gpx_dist)/res))
x_interp = splev(u_interp, tck)
# interpolate time data linearly to preserve monotonicity
if _gpx_data['tstamp']:
f = interp1d(np.cumsum(_gpx_dist), _gpx_data['tstamp'], fill_value = 'extrapolate')
tstamp_interp = f(u_interp)
gpx_data_interp = {'lat':list(x_interp[0]),
'lon':list(x_interp[1]),
'ele':list(x_interp[2]) if gpx_data['ele'] else None,
'tstamp':list(tstamp_interp) if gpx_data['tstamp'] else None,
'tzinfo':gpx_data['tzinfo']}
return gpx_data_interp
def gpx_calculate_distance(gpx_data, use_ele = True):
# input: gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
# use_ele = bool
# output: gpx_dist = numpy.ndarray[float]
gpx_dist = np.zeros(len(gpx_data['lat']))
for i in range(len(gpx_dist)-1):
lat1 = np.radians(gpx_data['lat'][i])
lon1 = np.radians(gpx_data['lon'][i])
lat2 = np.radians(gpx_data['lat'][i+1])
lon2 = np.radians(gpx_data['lon'][i+1])
delta_lat = lat2-lat1
delta_lon = lon2-lon1
c = 2.0*np.arcsin(np.sqrt(np.sin(delta_lat/2.0)**2+np.cos(lat1)*np.cos(lat2)*np.sin(delta_lon/2.0)**2)) # haversine formula
dist_latlon = EARTH_RADIUS*c # great-circle distance
if gpx_data['ele'] and use_ele:
dist_ele = gpx_data['ele'][i+1]-gpx_data['ele'][i]
gpx_dist[i+1] = np.sqrt(dist_latlon**2+dist_ele**2)
else:
gpx_dist[i+1] = dist_latlon
return gpx_dist
def gpx_calculate_speed(gpx_data):
# input: gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
# output: gpx_speed = numpy.ndarray[float]
gpx_dist = gpx_calculate_distance(gpx_data, use_ele = True)
gpx_dtstamp = np.diff(gpx_data['tstamp'], prepend = gpx_data['tstamp'][0])
gpx_dtstamp[gpx_dtstamp < 1e-6] = np.nan
gpx_speed = np.nan_to_num(gpx_dist/gpx_dtstamp, nan = 0.0)
return gpx_speed
def gpx_remove_duplicate(gpx_data):
# input: gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
# output: gpx_data_nodup = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
gpx_dist = gpx_calculate_distance(gpx_data)
i_dist = np.concatenate(([0], np.nonzero(gpx_dist)[0])) # keep gpx_dist[0] = 0.0
if not len(gpx_dist) == len(i_dist):
print('Removed {} duplicate trackpoint(s)'.format(len(gpx_dist)-len(i_dist)))
gpx_data_nodup = {'lat':[], 'lon':[], 'ele':[], 'tstamp':[], 'tzinfo':gpx_data['tzinfo']}
for k in ('lat', 'lon', 'ele', 'tstamp'):
gpx_data_nodup[k] = [gpx_data[k][i] for i in i_dist] if gpx_data[k] else None
return gpx_data_nodup
def gpx_read(gpx_file):
# input: gpx_file = str
# output: gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
gpx_data = {'lat':[], 'lon':[], 'ele':[], 'tstamp':[], 'tzinfo':None}
i = 0
i_latlon = []
i_tstamp = []
with open(gpx_file, 'r') as file:
gpx = gpxpy.parse(file)
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
gpx_data['lat'].append(point.latitude)
gpx_data['lon'].append(point.longitude)
i_latlon.append(i)
try:
gpx_data['ele'].append(point.elevation)
except:
pass
try:
gpx_data['tstamp'].append(point.time.timestamp())
except:
pass
else:
if not gpx_data['tzinfo']:
gpx_data['tzinfo'] = point.time.tzinfo
i_tstamp.append(i)
i += 1
if i_tstamp and not len(i_latlon) == len(i_tstamp):
for k in ('lat', 'lon', 'ele', 'tstamp'):
gpx_data[k] = [gpx_data[k][i] for i in i_tstamp] if gpx_data[k] else None
return gpx_data
def gpx_write(gpx_file, gpx_data, write_speed = False):
# input: gpx_file = str
# gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
# write_speed = bool
# output: None
if write_speed:
gpx_speed = gpx_calculate_speed(gpx_data)
gpx = gpxpy.gpx.GPX()
gpx_track = gpxpy.gpx.GPXTrack()
gpx_segment = gpxpy.gpx.GPXTrackSegment()
gpx.tracks.append(gpx_track)
gpx_track.segments.append(gpx_segment)
for i in range(len(gpx_data['lat'])):
lat = gpx_data['lat'][i]
lon = gpx_data['lon'][i]
ele = gpx_data['ele'][i] if gpx_data['ele'] else None
time = datetime.fromtimestamp(gpx_data['tstamp'][i], tz = gpx_data['tzinfo']) if gpx_data['tstamp'] else None
speed = gpx_speed[i] if write_speed else None
gpx_point = gpxpy.gpx.GPXTrackPoint(lat, lon, ele, time, speed = speed)
gpx_segment.points.append(gpx_point)
try:
with open(gpx_file, 'w') as file:
file.write(gpx.to_xml(version = '1.0' if write_speed else '1.1'))
except:
exit('ERROR Failed to save {}'.format(gpx_file))
return
# main
def main():
import argparse
parser = argparse.ArgumentParser(description = 'interpolate GPX file(s) using linear or spline interpolation')
parser.add_argument('gpx_files', metavar = 'FILE', nargs = '+', help = 'GPX file(s)')
parser.add_argument('-d', '--deg', type = int, default = 1, help = 'interpolation degree, 1=linear, 2-5=spline (default: 1)')
parser.add_argument('-r', '--res', type = float, default = 1.0, help = 'interpolation resolution in meters (default: 1)')
parser.add_argument('-s', '--speed', action = 'store_true', help = 'Save interpolated speed')
args = parser.parse_args()
for gpx_file in args.gpx_files:
if not gpx_file.endswith('_interpolated.gpx'):
gpx_data = gpx_read(gpx_file)
print('Read {} trackpoints from {}'.format(len(gpx_data['lat']), gpx_file))
gpx_data_interp = gpx_interpolate(gpx_data, args.res, args.deg)
output_file = '{}_interpolated.gpx'.format(gpx_file[:-4])
gpx_write(output_file, gpx_data_interp, write_speed = args.speed)
print('Saved {} trackpoints to {}'.format(len(gpx_data_interp['lat']), output_file))
if __name__ == '__main__':
main()
|
the-stack_0_9714 | import json
from . import eosapi
from . import config
def create_account_on_chain(from_account, new_account, balance, public_key):
assert len(new_account) == 12
assert balance <= 1.0
assert len(public_key) == 53 and public_key[:3] == 'EOS'
memo = '%s-%s'%(new_account, public_key)
return eosapi.transfer(from_account, 'signupeoseos', balance, memo)
def buyrambytes(payer, receiver, _bytes):
args = {"payer":payer,"receiver":receiver,"bytes":_bytes}
return eosapi.push_action(config.system_contract, 'buyrambytes', args, {payer:'active'})
def buyram(payer, receiver, quant):
args = {'payer':payer, 'receiver':receiver, 'quant':'%.4f %s'%(quant, config.main_token)}
return eosapi.push_action(config.system_contract, 'buyram', args, {payer:'active'})
def sellram(account, _bytes):
return eosapi.push_action(config.system_contract, 'sellram', {'account':account, 'bytes':_bytes}, {account:'active'})
def dbw(_from, _to, net, cpu, transfer=False):
args = {'from':_from,
'receiver':_to,
'stake_net_quantity':'%.4f %s'%(net, config.main_token),
'stake_cpu_quantity':'%.4f %s'%(cpu, config.main_token),
'transfer':transfer
}
return eosapi.push_action(config.system_contract, 'delegatebw', args, {_from:'active'})
def undbw(_from, _to, net, cpu, transfer=False):
args = {'from':_from,
'receiver':_to,
'unstake_net_quantity':'%.4f %s'%(net, config.main_token),
'unstake_cpu_quantity':'%.4f %s'%(cpu, config.main_token),
'transfer':transfer
}
return eosapi.push_action(config.system_contract, 'undelegatebw', args, {_from:'active'})
|
the-stack_0_9717 | import os
f=open("/tmp/yy.txt")
q=f.read()
s=''
for i in str(q):
# print(ord(i)+2,end="")
s=s+str(ord(i)+2)
#print()
#print(s)
s1=open("/tmp/yy1.txt",'w')
s1.write(s)
s1.close()
#print(q)
|
the-stack_0_9718 | # Construção do grafo com networkx para obtenção de informações e alterações necessárias
import networkx as nx
import json
from networkx.algorithms.simple_paths import all_simple_paths
with open("./data/disciplinas.json", 'r') as f:
line = f.readline()
disciplinas = json.loads(line)
G = nx.DiGraph()
G.add_nodes_from(list(disciplinas.keys()))
for key in list(disciplinas.keys()):
for req in disciplinas[key]['requisitos']:
G.add_edge(req, key)
if(len(disciplinas[key]['requisitos']) == 0):
G.add_edge('START', key)
# Obtem o maior caminho de disciplinas necessária para cada uma.
# Usado para determinar a posição do vértice durante a construção da visualização no app.
for key in list(disciplinas.keys()):
max_path = [len(p) for p in all_simple_paths(G, 'START', key)]
if (max_path):
disciplinas[key]['maxpath'] = max(max_path)-2
else:
disciplinas[key]['maxpath'] = 0
with open("./public/assets/data/disciplinas.json", 'w+') as f:
json.dump(disciplinas, f) |
the-stack_0_9720 | """Contains methods and classes to collect data from
tushare API
"""
import pandas as pd
import tushare as ts
from tqdm import tqdm
class TushareDownloader :
"""Provides methods for retrieving daily stock data from
tushare API
Attributes
----------
start_date : str
start date of the data (modified from config.py)
end_date : str
end date of the data (modified from config.py)
ticker_list : list
a list of stock tickers (modified from config.py)
Methods
-------
fetch_data()
Fetches data from tushare API
date:date
Open: opening price
High: the highest price
Close: closing price
Low: lowest price
Volume: volume
Price_change: price change
P_change: fluctuation
ma5: 5-day average price
Ma10: 10 average daily price
Ma20:20 average daily price
V_ma5:5 daily average
V_ma10:10 daily average
V_ma20:20 daily average
"""
def __init__(self, start_date: str, end_date: str, ticker_list: list):
self.start_date = start_date
self.end_date = end_date
self.ticker_list = ticker_list
def fetch_data(self) -> pd.DataFrame:
"""Fetches data from Yahoo API
Parameters
----------
Returns
-------
`pd.DataFrame`
7 columns: A date, open, high, low, close, volume and tick symbol
for the specified stock ticker
"""
# Download and save the data in a pandas DataFrame:
data_df = pd.DataFrame()
for tic in tqdm(self.ticker_list, total=len(self.ticker_list)):
temp_df = ts.get_hist_data(tic[0:6],start=self.start_date,end=self.end_date)
temp_df["tic"] = tic[0:6]
data_df = data_df.append(temp_df)
data_df = data_df.reset_index(level="date")
# create day of the week column (monday = 0)
data_df = data_df.drop(["price_change","p_change","ma5","ma10","ma20","v_ma5","v_ma10","v_ma20"], 1)
data_df["day"] = pd.to_datetime(data_df["date"]).dt.dayofweek
#rank desc
data_df = data_df.sort_index(axis=0,ascending=False)
data_df = data_df.reset_index(drop=True)
# convert date to standard string format, easy to filter
data_df["date"] = pd.to_datetime(data_df["date"])
data_df["date"] = data_df.date.apply(lambda x: x.strftime("%Y-%m-%d"))
# drop missing data
data_df = data_df.dropna()
print("Shape of DataFrame: ", data_df.shape)
# print("Display DataFrame: ", data_df.head())
print(data_df)
data_df = data_df.sort_values(by=['date','tic']).reset_index(drop=True)
return data_df
def select_equal_rows_stock(self, df):
df_check = df.tic.value_counts()
df_check = pd.DataFrame(df_check).reset_index()
df_check.columns = ["tic", "counts"]
mean_df = df_check.counts.mean()
equal_list = list(df.tic.value_counts() >= mean_df)
names = df.tic.value_counts().index
select_stocks_list = list(names[equal_list])
df = df[df.tic.isin(select_stocks_list)]
return df
def select_equal_rows_stock(self, df):
df_check = df.tic.value_counts()
df_check = pd.DataFrame(df_check).reset_index()
df_check.columns = ["tic", "counts"]
mean_df = df_check.counts.mean()
equal_list = list(df.tic.value_counts() >= mean_df)
names = df.tic.value_counts().index
select_stocks_list = list(names[equal_list])
df = df[df.tic.isin(select_stocks_list)]
return df
|
the-stack_0_9723 | #!/home/knielbo/virtenvs/teki/bin/python
"""
# front page only for us
$ python infomedia_parser.py --dataset ../dat/NEWS-DATA/berglinske-print --pagecontrol True --page 1 --sort True --verbose 10
# all pages for Peter
$ python infomedia_parser.py --dataset ../dat/NEWS-DATA/berglinske-print --pagecontrol False --page 1 --sort True --verbose 10
"""
import os
import argparse
import json
import re
import glob
import newlinejson
def preprocess(dobj):
# filters
stopwords = [r"forsidehenvisning", r" side "]#, r"side", r"SIDE"]
pat0 = re.compile(r"<.*?>")# remove html tags
pat1 = re.compile(r" +")# remove extra spacing to deal with p1 header
text = dobj["BodyText"]
heading = dobj["Heading"]
subheading = dobj["SubHeading"]
text = text + " " + heading + " " + subheading
text = re.sub(pat0, " ", text)
for word in stopwords:
text = re.sub(word, " ", text, flags=re.IGNORECASE)
text = re.sub(pat1, " ", text)
title = dobj["Paragraph"]
date = dobj["PublishDate"]
return text, title, date
flatten = lambda l: [item for sublist in l for item in sublist]
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to folder with input data")
ap.add_argument("-c", "--pagecontrol", required=False, default=False, help="if extraction should be focused on specific page")
ap.add_argument("-p", "--page", required=False, type=int, default=1, help="which page to be focused on, default is front page")
ap.add_argument("-s", "--sort", required=False, type=bool, default=True, help="sort data in date")
ap.add_argument("-v", "--verbose", required=False, type=int, default=-1, help="verbose mode (number of object to print), -1 to deactivate")
ap.add_argument('-fn', '--filename', required=False, type=bool, default=False, help='Print filenames during processing')
args = vars(ap.parse_args())
TEXT, TITLE, DATE = list(), list(), list()
error = list()
filenames = glob.glob(os.path.join(args["dataset"], "*.ndjson"))
for i, filename in enumerate(filenames):
if args['filename']:
print(filename)
with open(filename, "r") as fobj:
lignes = fobj.readlines()
if lignes:
texts = list()
titles = list()
dates = list()
for ligne in lignes:
dobj = json.loads(ligne)
# control for missing PageIds
if dobj["PageIds"][0]:
pageid = int(dobj["PageIds"][0])
else:
pageid = 'NA (PageIds blank in API)'
# extract date from page
if args["pagecontrol"]:
if pageid == args["page"]:
text, title, date = preprocess(dobj)
texts.append(text)
titles.append(title)
dates.append(date)
# get all data
else:
text, title, date = preprocess(dobj)
texts.append(text)
titles.append(title)
dates.append(date)
# concatenate all content on page
if args["pagecontrol"]:
# control for empty pages
if texts and dates and titles:
texts = [" ".join(texts)]
dates = [dates[0]]
titles = [" ".join(titles)]
else:
texts = []
dates = []
titles = []
TEXT.append(texts)
DATE.append(dates)
TITLE.append(titles)
# record empty files
else:
error.append(os.path.basename(filename))
if args["verbose"] > 0 and i > 0 and (i + 1) % args["verbose"] == 0:
print("[INFO] processed {}/{}".format(i + 1, len(filenames)))
print("[INFO] {} of {} are empty in {} ...".format(len(error),len(filenames), os.path.basename(args["dataset"])))
# flatten ls of ls
TEXT = flatten(TEXT)
TITLE = flatten(TITLE)
DATE = flatten(DATE)
# sort data on date
if args["sort"]:
TEXT = [text for _,text in sorted(zip(DATE, TEXT))]
TITLE = [title for _,title in sorted(zip(DATE, TITLE))]
DATE = sorted(DATE)
# write to external
lignes = list()
for i, date in enumerate(DATE):
d = dict()
d["date"] = date
d["text"] = TEXT[i]
d["title"] = TITLE[i]
lignes.append(d)
# folder
if args['pagecontrol']:
outdir = 'FrontPage'
else:
outdir = 'AllPages'
if not os.path.exists(outdir):
os.mkdir(outdir)
fname = os.path.join(outdir,
os.path.basename(
os.path.normpath(args["dataset"])
) + ".ndjson")
print("[INFO] writing target data to: {}".format(fname))
with open(fname, "w") as f:
newlinejson.dump(lignes, f, ensure_ascii=False)
if __name__=="__main__":
main() |
the-stack_0_9726 | # Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
import traceback
from colcon_core.logging import colcon_logger
from colcon_core.plugin_system import instantiate_extensions
from colcon_core.plugin_system import order_extensions_by_priority
logger = colcon_logger.getChild(__name__)
class ArgcompleteCompleterExtensionPoint:
"""
The interface for argcomplete completer extensions.
An argcomplete completer extension provides completion proposals for
command line arguments.
For each instance the attribute `ARGCOMPLETE_COMPLETER_NAME` is being set
to the basename of the entry point registering the extension.
"""
"""The version of the argcomplete completer extension interface."""
EXTENSION_POINT_VERSION = '1.0'
"""The default priority of argcomplete completer extensions."""
PRIORITY = 100
def get_completer(self, parser, *args, **kwargs):
"""
Get a completer for a specific argument.
The argument is identified by the same `args` and `kwargs` which are
passed to the `add_argument()` function of the parser.
This method must be overridden in a subclass.
:param parser: The argument parser on which `add_argument()` was
called on
:param args: The positional arguments to `add_argument()`
:param kwargs: The keyword arguments to `add_argument()`
:returns: An argcomplete completer, or None
"""
raise NotImplementedError()
def get_argcomplete_completer_extensions(*, exclude_names=None):
"""Get the argcomplete completer extensions in priority order."""
extensions = instantiate_extensions(__name__, exclude_names=exclude_names)
for name, extension in extensions.items():
extension.ARGCOMPLETE_COMPLETER_NAME = name
return order_extensions_by_priority(extensions)
def get_argcomplete_completer(parser, *args, **kwargs):
"""Get the completer for given arguments."""
extensions = get_argcomplete_completer_extensions()
# try extensions in priority order
for extension in extensions.values():
# check if extension provides a completer
logger.log(
1,
'get_argcomplete_completer('
'{extension.ARGCOMPLETE_COMPLETER_NAME}) for {args}'
.format_map(locals()))
try:
completer = extension.get_completer(parser, *args, **kwargs)
assert callable(completer) or completer is None, \
'get_completer() should return a callable or None'
except Exception as e: # noqa: F841
# catch exceptions raised in completer extension
exc = traceback.format_exc()
logger.error(
'Exception in argcomplete completer extension '
"'{extension.ARGCOMPLETE_COMPLETER_NAME}': {e}\n{exc}"
.format_map(locals()))
# skip failing extension, continue with next one
continue
# if not continue with next extension
if completer is None:
continue
# return provided completer
logger.log(
5,
'get_argcomplete_completer('
'{extension.ARGCOMPLETE_COMPLETER_NAME}) provided a completer for '
'{args}'.format_map(locals()))
return completer
return None
|
the-stack_0_9727 | import os
import errno
import random
import numpy as np
import torch as th
def set_random_seeds(seed, cuda):
"""Set seeds for python random module numpy.random and torch.
Parameters
----------
seed: int
Random seed.
cuda: bool
Whether to set cuda seed with torch.
"""
random.seed(seed)
th.manual_seed(seed)
if cuda:
th.cuda.manual_seed_all(seed)
np.random.seed(seed)
def np_to_var(
X, requires_grad=False, dtype=None, pin_memory=False, **tensor_kwargs
):
"""
Convenience function to transform numpy array to `torch.Tensor`.
Converts `X` to ndarray using asarray if necessary.
Parameters
----------
X: ndarray or list or number
Input arrays
requires_grad: bool
passed on to Variable constructor
dtype: numpy dtype, optional
var_kwargs:
passed on to Variable constructor
Returns
-------
var: `torch.Tensor`
"""
if not hasattr(X, "__len__"):
X = [X]
X = np.asarray(X)
if dtype is not None:
X = X.astype(dtype)
X_tensor = th.tensor(X, requires_grad=requires_grad, **tensor_kwargs)
if pin_memory:
X_tensor = X_tensor.pin_memory()
return X_tensor
def var_to_np(var):
"""Convenience function to transform `torch.Tensor` to numpy
array.
Should work both for CPU and GPU."""
return var.cpu().data.numpy()
def corr(a, b):
"""
Computes correlation only between terms of a and terms of b, not within
a and b.
Parameters
----------
a, b: 2darray, features x samples
Returns
-------
Correlation between features in x and features in y
"""
# Difference to numpy:
# Correlation only between terms of x and y
# not between x and x or y and y
this_cov = cov(a, b)
return _cov_to_corr(this_cov, a, b)
def cov(a, b):
"""
Computes covariance only between terms of a and terms of b, not within
a and b.
Parameters
----------
a, b: 2darray, features x samples
Returns
-------
Covariance between features in x and features in y
"""
demeaned_a = a - np.mean(a, axis=1, keepdims=True)
demeaned_b = b - np.mean(b, axis=1, keepdims=True)
this_cov = np.dot(demeaned_a, demeaned_b.T) / (b.shape[1] - 1)
return this_cov
def _cov_to_corr(this_cov, a, b):
# computing "unbiased" corr
# ddof=1 for unbiased..
var_a = np.var(a, axis=1, ddof=1)
var_b = np.var(b, axis=1, ddof=1)
return _cov_and_var_to_corr(this_cov, var_a, var_b)
def _cov_and_var_to_corr(this_cov, var_a, var_b):
divisor = np.outer(np.sqrt(var_a), np.sqrt(var_b))
return this_cov / divisor
def wrap_reshape_apply_fn(stat_fn, a, b, axis_a, axis_b):
"""
Reshape two nd-arrays into 2d-arrays, apply function and reshape
result back.
Parameters
----------
stat_fn: function
Function to apply to 2d-arrays
a: nd-array: nd-array
b: nd-array
axis_a: int or list of int
sample axis
axis_b: int or list of int
sample axis
Returns
-------
result: nd-array
The result reshaped to remaining_dims_a + remaining_dims_b
"""
if not hasattr(axis_a, "__len__"):
axis_a = [axis_a]
if not hasattr(axis_b, "__len__"):
axis_b = [axis_b]
other_axis_a = [i for i in range(a.ndim) if i not in axis_a]
other_axis_b = [i for i in range(b.ndim) if i not in axis_b]
transposed_topo_a = a.transpose(tuple(other_axis_a) + tuple(axis_a))
n_stat_axis_a = [a.shape[i] for i in axis_a]
n_other_axis_a = [a.shape[i] for i in other_axis_a]
flat_topo_a = transposed_topo_a.reshape(
np.prod(n_other_axis_a), np.prod(n_stat_axis_a)
)
transposed_topo_b = b.transpose(tuple(other_axis_b) + tuple(axis_b))
n_stat_axis_b = [b.shape[i] for i in axis_b]
n_other_axis_b = [b.shape[i] for i in other_axis_b]
flat_topo_b = transposed_topo_b.reshape(
np.prod(n_other_axis_b), np.prod(n_stat_axis_b)
)
assert np.array_equal(n_stat_axis_a, n_stat_axis_b)
stat_result = stat_fn(flat_topo_a, flat_topo_b)
topo_result = stat_result.reshape(
tuple(n_other_axis_a) + tuple(n_other_axis_b)
)
return topo_result
class FuncAndArgs(object):
"""Container for a function and its arguments.
Useful in case you want to pass a function and its arguments
to another function without creating a new class.
You can call the new instance either with the apply method or
the ()-call operator:
>>> FuncAndArgs(max, 2,3).apply(4)
4
>>> FuncAndArgs(max, 2,3)(4)
4
>>> FuncAndArgs(sum, [3,4])(8)
15
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def apply(self, *other_args, **other_kwargs):
all_args = self.args + other_args
all_kwargs = self.kwargs.copy()
all_kwargs.update(other_kwargs)
return self.func(*all_args, **all_kwargs)
def __call__(self, *other_args, **other_kwargs):
return self.apply(*other_args, **other_kwargs)
def add_message_to_exception(exc, additional_message):
# give some more info...
# see http://www.ianbicking.org/blog/2007/09/re-raising-exceptions.html
args = exc.args
if not args:
arg0 = ""
else:
arg0 = args[0]
arg0 += additional_message
exc.args = (arg0,) + args[1:]
def dict_compare(d1, d2):
"""From http://stackoverflow.com/a/18860653/1469195"""
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
same = set(o for o in intersect_keys if d1[o] == d2[o])
return added, removed, modified, same
def dict_equal(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
return (
intersect_keys == d2_keys and
intersect_keys == d1_keys and
len(modified) == 0
)
def dict_is_subset(d1, d2):
added, removed, modified, same = dict_compare(d1, d2)
return len(added) == 0 and len(modified) == 0
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
http://stackoverflow.com/a/26853961
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def touch_file(path):
# from http://stackoverflow.com/a/12654798/1469195
basedir = os.path.dirname(path)
if not os.path.exists(basedir):
os.makedirs(basedir)
with open(path, "a"):
os.utime(path, None)
def to_tuple(sequence_or_element, length=None):
if hasattr(sequence_or_element, "__len__"):
assert length is None
return tuple(sequence_or_element)
else:
if length is None:
return (sequence_or_element,)
else:
return (sequence_or_element,) * length
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def select_inverse_inds(arr, inds):
mask = np.ones(len(arr), dtype=bool)
mask[inds] = False
return arr[mask]
|
the-stack_0_9728 | #! /usr/bin/env python3
from __future__ import print_function
import sys
import os
import re
from ROOT import *
import MultipleCompare as MultipleCompare
__author__ = "Lars Perchalla ([email protected])"
__doc__ = """Script to execute multiple plotting commands via MultipleCompare.py. Switch between massiveMode producing a set of plots comparing each one by one, and defaultMode producing a smaller set of default plot combinations by adding the commandline option massiveMode:\n\n
Usage: SteerMultipleCompare.py -T testFile -R refFile [options] [search strings that you want to apply '*' is supported as special character]
see MultiCompare.py for details
"""
def StripPath(name):
path = ''
plot = ''
matches = re.match(r'(.*)\/(.*)$', name)
if matches:
path = matches.group(1)
plot = matches.group(2)
return [path, plot]
def CreateDirectory(dir,addToExisting=False):
if os.path.exists(dir) and not addToExisting:
print("Output directory %s already exists! OK to overwrite?" % dir)
while True:
input = raw_input("Please enter [y/n] ")
if (input == 'y'):
break
elif (input == 'n'):
print(" ...exiting.")
sys.exit()
if not os.path.exists(dir):
os.makedirs(dir)
def CreateBaseDirectory(options):
if options.out == 'MultipleCompare.png' or options.out.find('.')!=-1:
#default case, so no directory was given
#or a filename was given
outputDirName = 'MultipleCompareOutput'
else:
outputDirName = options.out
outputDir = os.path.join(os.getcwd(), outputDirName)
CreateDirectory(outputDir)
return outputDir
def CreateSubDirectory(basedir, path):
outputDir = os.path.join(basedir, path)
CreateDirectory(outputDir,True)
def CleanArguments(argv, option):
#remove existing output arguments
while argv.count(option) > 0:
index = argv.index(option)
if index < len(argv)-1:
argv.pop(index+1)#drop the corresponding value
argv.pop(index)#drop the option itself
#execute Multicompare for each plot as a comparison one by one
#argv was modified to contain only one plot each
def plotOneByOne(argv, outputDir, histoList, histoSubNames, paths):
for hist, name, path in zip(histoList, histoSubNames, paths):
CreateSubDirectory(outputDir, path)
#now give modified arguments to MultipleCompare
tmpArgv = argv[:]
tmpArgv.append('-o')
tmpArgv.append(outputDir+'/'+path+'/'+name+'.png')
tmpArgv.append(hist)
MultipleCompare.main(tmpArgv)
def plotDefault(argv, outputDir, name, type, plots, addArgv=[]):
tmpArgv = argv[:]
tmpArgv.append('-o')
tmpArgv.append(outputDir+'/'+name+type)
tmpArgv.extend(addArgv)
tmpArgv.extend(plots)
MultipleCompare.main(tmpArgv)
#make some default plots grouping several histograms
def plotDefaults(argv, options, outputDir):
name = 'Validation_'
if options.testLabel != None:
name += options.testLabel+'_'
else:
name += options.test+'_vs_'
if options.refLabel != None:
name += options.refLabel+'_'
else:
name += options.ref+'_'
outputType = '.eps'
additionalArgv = []
if outputDir.find('QCD')!=-1:
additionalArgv.append('-f') #fakerate
plotDefault(argv, outputDir, name, 'LeptonRejectionEffphi'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*Rejection/*Effphi'], additionalArgv)
plotDefault(argv, outputDir, name, 'LeptonRejectionEffeta'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*Rejection/*Effeta'], additionalArgv)
plotDefault(argv, outputDir, name, 'LeptonRejectionEffpt'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*Rejection/*Effpt'], additionalArgv)
if outputDir.find('QCD')!=-1:
additionalArgv.append('--logScale')
plotDefault(argv, outputDir, name, 'Effphi'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*Effphi', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*Effphi'], additionalArgv)
plotDefault(argv, outputDir, name, 'Effeta'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*Effeta', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*Effeta'], additionalArgv)
plotDefault(argv, outputDir, name, 'Effpt'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*Effpt', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*Effpt'], additionalArgv)
plotDefault(argv, outputDir, name, 'pTRatio_allHadronic'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_pTRatio_allHadronic', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_pTRatio_allHadronic'])
plotDefault(argv, outputDir, name, 'pTRatio_oneProng1Pi0'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_pTRatio_oneProng1Pi0', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_pTRatio_oneProng1Pi0'])
plotDefault(argv, outputDir, name, 'pTRatio_threeProng0Pi0'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_pTRatio_threeProng0Pi0', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_pTRatio_threeProng0Pi0'])
plotDefault(argv, outputDir, name, 'Size_isolationPFChargedHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_Size_isolationPFChargedHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_Size_isolationPFChargedHadrCands'])
plotDefault(argv, outputDir, name, 'Size_isolationPFNeutrHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_Size_isolationPFNeutrHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_Size_isolationPFNeutrHadrCands'])
plotDefault(argv, outputDir, name, 'Size_isolationPFGammaCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_Size_isolationPFGammaCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_Size_isolationPFGammaCands'])
plotDefault(argv, outputDir, name, 'SumPt_isolationPFChargedHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_SumPt_isolationPFChargedHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_SumPt_isolationPFChargedHadrCands'])
plotDefault(argv, outputDir, name, 'SumPt_isolationPFNeutrHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_SumPt_isolationPFNeutrHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_SumPt_isolationPFNeutrHadrCands'])
plotDefault(argv, outputDir, name, 'SumPt_isolationPFGammaCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_SumPt_isolationPFGammaCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_SumPt_isolationPFGammaCands'])
def main(argv=None):
if argv is None:
argv = sys.argv
options, toPlot = MultipleCompare.LoadCommandlineOptions(argv)
gROOT.SetBatch()
testFile = TFile(options.test)
refFile = None
if options.ref != '':
refFile = TFile(options.ref)
plotList = []
MultipleCompare.MapDirStructure( testFile,'',plotList)
if len(plotList)<1:
print('\tError: Please specify at least one histogram. The following ones are available in the root file.')
print(plotList)
sys.exit()
histoList = []
histoSubNames = []
paths = []
massiveMode = False
for plot in toPlot:
#clean the arguments. toPlot contains the list of positional arguments leftover after parsing options
argv.remove(plot)
for path in plotList:
if MultipleCompare.Match(plot.lower(),path.lower()):
histoList.append(path)
strippedPath, strippedPlot = StripPath(path)
paths.append(strippedPath)
histoSubNames.append(strippedPlot)
#print histoSubNames[-1]
elif plot.find('massiveMode') != -1:
massiveMode = True
CleanArguments(argv,'--output')
CleanArguments(argv,'-o')
outputDir = CreateBaseDirectory(options)
if massiveMode:
print("Massive mode: scan all subdirs and make plots comparing each histogram one by one.")
plotOneByOne(argv, outputDir, histoList, histoSubNames, paths)
else:
print("Default mode: Make default plot combinations.")
plotDefaults(argv, options, outputDir)
#only execute main() if manually run
if __name__ == '__main__':
#main(*sys.argv[1:])
# the calls to sys.exit(n) inside main() all become return n.
sys.exit(main())
else:
print("This is ",__name__)
|
the-stack_0_9729 | from lib.interface.cores import c
from lib.interface.valida import leiaInt
def linha(s, x=60, cor=0):
print(f'{c(cor)}{s}'*x, f'{c(0)}')
def cabecalho(x, msg):
linha(x, cor=9)
print('{}{}{}'.format(c(11), msg.center(60), c(0)))
linha(x, cor=9)
def menu(lst):
cabecalho('-', 'MENU PRINCIPAL')
op = 1
for val in lst:
print(f'{c(7)}[{op}] {val}{c(0)}')
op += 1
linha('-', cor=9)
resp = leiaInt(f'{c(1)}Escolha sua opção{c(0)} ')
return resp
|
the-stack_0_9731 | import json
from pprint import pprint
from configparser import ConfigParser
from pybea.client import BureauEconomicAnalysisClient
# Grab configuration values.
config = ConfigParser()
config.read("configs/config.ini")
API_KEY = config.get("alex_credentials", "API_KEY")
def save_response(name: str, data: dict) -> None:
"""Use this if you want to save the responses."""
with open(
file=f"samples/responses/{name}.jsonc",
mode="w+",
encoding="utf-8",
) as sample_file:
json.dump(obj=data, fp=sample_file, indent=4)
# Initalize the new Client.
bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
# Grab the Dataset List.
dataset_list = bea_client.get_dataset_list()
pprint(dataset_list)
# Grab the Paramters List.
parameters_set_list = bea_client.get_parameters_list(dataset_name="Regional")
pprint(parameters_set_list)
# Grab GDP for the Finance & Insurance Industry (58), for the years 2018 &
# 2019 and an annual basis ('A')
gdp_by_industry = bea_client.gdp_by_industry(
year=["2019", "2018"], industry="52", frequency="A"
)
pprint(gdp_by_industry)
# Grab National Product and Income Data.
national_income = bea_client.national_income_and_product_accounts(
table_name="T10101", frequency=["A", "Q"], year=["2011", "2012"]
)
pprint(national_income)
# Grab National Product and Income Data.
national_income_detail = bea_client.national_income_and_product_accounts_detail(
table_name="U20305", frequency=["A", "Q"], year=["2011", "2012"]
)
pprint(national_income_detail)
# Grab Current-Cost Net Stock of Private Fixed Assets, Equipment, Structures,
# and Intellectual Property Products by Type, for all years.
fixed_assets = bea_client.fixed_assets(table_name="FAAt201", year=["2011", "2012"])
pprint(fixed_assets)
# U. S. direct investment position in China and Asia for 2011 and 2012
investments = bea_client.direct_investments_and_multinational_enterprises(
direction_of_investment="outward",
classification="country",
series_id=["30"],
year=["2011", "2012"],
country=["650", "699"],
)
pprint(investments)
# Net income and sales for Brazilian affiliates of U. S. parent enterprises,
# all industries, 2011 and 2012.
investments = bea_client.activities_investments_and_multinational_enterprises(
direction_of_investment="outward",
classification="CountryByIndustry",
series_id=["4", "5"],
year=["2011", "2012"],
country=["202"],
ownership_level=False,
industry="ALL",
non_bank_affilates_only=False,
)
pprint(investments)
# Balance on goods with China for 2011 and 2012.
balance_on_goods = bea_client.international_transactions(
indicator=["BalGds"],
area_or_country=["China"],
year=["2011", "2012"],
frequency=["A"],
)
pprint(balance_on_goods)
# U.S. assets excluding financial derivatives; change in position
# attributable to price changes for all available years
us_assets = bea_client.international_investments_positions(
type_of_investment=["FinAssetsExclFinDeriv"],
component=["ChgPosPrice"],
year="ALL",
frequency=["A"],
)
pprint(us_assets)
# Data from Industry‐by‐Commodity Total Requirements, After Redefinitions
# (Sector Level) table for years 2010, 2011, and 2012.
input_output_data = bea_client.input_output_statstics(
table_id=["56"], year=["2010", "2011", "2012", "2013"]
)
pprint(input_output_data)
# Quarterly Value Added by Industry data for all industries for years 2012 and 2013.
underlying_gdp_by_industry = bea_client.underlying_gdp_by_industry(
industry="ALL", frequency=["A"], year=["2012", "2013"], table_id="ALL"
)
pprint(underlying_gdp_by_industry)
# Exports of telecommunications services by U.S. parents to their foreign affiliates for all years.
international_trade_services = bea_client.international_trade_services(
type_of_service="Telecom",
trade_direction=["Exports"],
year="ALL",
affiliation=["USPARENTS"],
area_or_country="AllCountries",
)
pprint(international_trade_services)
save_response(
name="get_international_trade_services", data=international_trade_services
)
# Personal income for 2012 and 2013 for all counties.
regional_data = bea_client.regional(
table_name=["CAINC1"], line_code=1, geo_fips=["COUNTY"], year=["2012", "2013"]
)
pprint(regional_data)
save_response(name="get_regional_data", data=regional_data)
|
the-stack_0_9732 | from django.core import mail
from selenium.webdriver.common.keys import Keys
import re
from .base import FunctionalTest
TEST_EMAIL = '[email protected]'
SUBJECT = 'Your login link for Superlists'
class LoginTest(FunctionalTest):
def test_can_get_email_link_to_log_in(self):
# Edith entra no site e nota o campo "Log in"
# O campo informa para ela entrar com o email, e ela entra
self.browser.get(self.live_server_url)
self.browser.find_element_by_name('email').send_keys(TEST_EMAIL)
self.browser.find_element_by_name('email').send_keys(Keys.ENTER)
# Uma mensagem informa que o email email foi enviado para ela
self.wait_for(lambda: self.assertIn(
'Check your email',
self.browser.find_element_by_tag_name('body').text
))
# Ela verifica o email e encontra a mensagem
email = mail.outbox[0]
self.assertIn(TEST_EMAIL, email.to)
self.assertEqual(email.subject, SUBJECT)
# O email contém uma URL
self.assertIn('Use this link to log in', email.body)
url_search = re.search(r'http://.+/.+$', email.body)
if not url_search:
self.fail(f'Could not find url in email body:\n{email.body}')
url = url_search.group(0)
self.assertIn(self.live_server_url, url)
# Ela clica na URL
self.browser.get(url)
# Ela está logada
self.wait_for(
lambda: self.browser.find_element_by_link_text('Log out')
)
navbar = self.browser.find_element_by_css_selector('.navbar')
self.assertIn(TEST_EMAIL, navbar.text) |
the-stack_0_9733 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import ast
import errno
import logging
import sqlite3
import pytz
import threading
import os
import re
from .basedb import DbDriver
from collections import defaultdict
from datetime import datetime
from math import ceil
from volttron.platform.agent import utils
from volttron.platform import jsonapi
from volttron.platform.agent.utils import fix_sqlite3_datetime
utils.setup_logging()
_log = logging.getLogger(__name__)
# Make sure sqlite3 datetime adapters are updated.
fix_sqlite3_datetime()
class SqlLiteFuncts(DbDriver):
"""
Implementation of SQLite3 database operation for
:py:class:`sqlhistorian.historian.SQLHistorian` and
:py:class:`sqlaggregator.aggregator.SQLAggregateHistorian`
For method details please refer to base class
:py:class:`volttron.platform.dbutils.basedb.DbDriver`
"""
def __init__(self, connect_params, table_names):
database = connect_params['database']
thread_name = threading.currentThread().getName()
_log.debug(
"initializing sqlitefuncts in thread {}".format(thread_name))
if database == ':memory:':
self.__database = database
else:
self.__database = os.path.expandvars(os.path.expanduser(database))
db_dir = os.path.dirname(self.__database)
# If the db does not exist create it in case we are started
# before the historian.
try:
if db_dir == '':
if utils.is_secure_mode():
data_dir = os.path.basename(os.getcwd()) + ".agent-data"
db_dir = os.path.join(os.getcwd(), data_dir)
else:
db_dir = './data'
self.__database = os.path.join(db_dir, self.__database)
os.makedirs(db_dir)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(db_dir):
raise
connect_params['database'] = self.__database
if 'detect_types' not in connect_params:
connect_params['detect_types'] = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
if 'timeout' not in connect_params.keys():
connect_params['timeout'] = 10
self.data_table = None
self.topics_table = None
self.meta_table = None
self.agg_topics_table = None
self.agg_meta_table = None
if table_names:
self.data_table = table_names['data_table']
self.topics_table = table_names['topics_table']
self.meta_table = table_names['meta_table']
self.agg_topics_table = table_names['agg_topics_table']
self.agg_meta_table = table_names['agg_meta_table']
_log.debug("In sqlitefuncts connect params {}".format(connect_params))
super(SqlLiteFuncts, self).__init__('sqlite3', **connect_params)
def setup_historian_tables(self):
result = self.select('''PRAGMA auto_vacuum''')
auto_vacuum = result[0][0]
if auto_vacuum != 1:
_log.info("auto_vacuum set to 0 (None), updating to 1 (full).")
_log.info("VACCUUMing DB to cause new auto_vacuum setting to take effect. "
"This could be slow on a large database.")
self.select('''PRAGMA auto_vacuum=1''')
self.select('''VACUUM;''')
rows = self.select(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{self.data_table}';")
if rows:
_log.debug("Tables already exists")
rows = self.select(f"PRAGMA table_info({self.topics_table})")
for row in rows:
if row[1] == "metadata":
_log.debug("Existing topics table contains metadata column")
self.meta_table = self.topics_table
else:
self.meta_table = self.topics_table
self.execute_stmt(
'''CREATE TABLE IF NOT EXISTS ''' + self.data_table +
''' (ts timestamp NOT NULL,
topic_id INTEGER NOT NULL,
value_string TEXT NOT NULL,
UNIQUE(topic_id, ts))''', commit=False)
self.execute_stmt(
'''CREATE INDEX IF NOT EXISTS data_idx
ON ''' + self.data_table + ''' (ts ASC)''', commit=False)
self.execute_stmt(
'''CREATE TABLE IF NOT EXISTS ''' + self.topics_table +
''' (topic_id INTEGER PRIMARY KEY,
topic_name TEXT NOT NULL,
metadata TEXT,
UNIQUE(topic_name))''', commit=False)
self.commit()
# metadata is in topics table
self.meta_table = self.topics_table
_log.debug("Created new schema. data and topics tables")
def setup_aggregate_historian_tables(self):
self.execute_stmt(
'CREATE TABLE IF NOT EXISTS ' + self.agg_topics_table +
' (agg_topic_id INTEGER PRIMARY KEY, \
agg_topic_name TEXT NOT NULL, \
agg_type TEXT NOT NULL, \
agg_time_period TEXT NOT NULL, \
UNIQUE(agg_topic_name, agg_type, agg_time_period));')
self.execute_stmt(
'CREATE TABLE IF NOT EXISTS ' + self.agg_meta_table +
'(agg_topic_id INTEGER NOT NULL PRIMARY KEY, \
metadata TEXT NOT NULL);')
_log.debug("Created aggregate topics and meta tables")
self.commit()
def query(self, topic_ids, id_name_map, start=None, end=None, agg_type=None, agg_period=None, skip=0, count=None,
order="FIRST_TO_LAST"):
"""
This function should return the results of a query in the form:
.. code-block:: python
{"values": [(timestamp1, value1), (timestamp2, value2), ...],
"metadata": {"key1": value1, "key2": value2, ...}}
metadata is not required (The caller will normalize this to {} for you)
@param topic_ids: topic_ids to query data for
@param id_name_map: dictionary containing topic_id:topic_name
@param start:
@param end:
@param agg_type:
@param agg_period:
@param skip:
@param count:
@param order:
"""
table_name = self.data_table
value_col = 'value_string'
if agg_type and agg_period:
table_name = agg_type + "_" + agg_period
value_col = 'agg_value'
query = '''SELECT topic_id, ts, ''' + value_col + '''
FROM ''' + table_name + '''
{where}
{order_by}
{limit}
{offset}'''
where_clauses = ["WHERE topic_id = ?"]
args = [topic_ids[0]]
# base historian converts naive timestamps to UTC, but if the start and end had explicit timezone info then they
# need to get converted to UTC since sqlite3 only store naive timestamp
if start:
start = start.astimezone(pytz.UTC)
if end:
end = end.astimezone(pytz.UTC)
if start and end and start == end:
where_clauses.append("ts = ?")
args.append(start)
else:
if start:
where_clauses.append("ts >= ?")
args.append(start)
if end:
where_clauses.append("ts < ?")
args.append(end)
where_statement = ' AND '.join(where_clauses)
order_by = 'ORDER BY topic_id ASC, ts ASC'
if order == 'LAST_TO_FIRST':
order_by = ' ORDER BY topic_id DESC, ts DESC'
# can't have an offset without a limit
# -1 = no limit and allows the user to provide just an offset
if count is None:
count = -1
limit_statement = 'LIMIT ?'
args.append(count)
offset_statement = ''
if skip > 0:
offset_statement = 'OFFSET ?'
args.append(skip)
real_query = query.format(where=where_statement,
limit=limit_statement,
offset=offset_statement,
order_by=order_by)
_log.debug("Real Query: " + real_query)
_log.debug("args: " + str(args))
values = defaultdict(list)
start_t = datetime.utcnow()
for topic_id in topic_ids:
args[0] = topic_id
values[id_name_map[topic_id]] = []
cursor = self.select(real_query, args, fetch_all=False)
if cursor:
if value_col == 'agg_value':
for _id, ts, value in cursor:
values[id_name_map[topic_id]].append((utils.format_timestamp(ts), value))
cursor.close()
else:
for _id, ts, value in cursor:
values[id_name_map[topic_id]].append((utils.format_timestamp(ts), jsonapi.loads(value)))
cursor.close()
_log.debug("Time taken to load results from db:{}".format(datetime.utcnow()-start_t))
return values
def manage_db_size(self, history_limit_timestamp, storage_limit_gb):
"""
Manage database size.
:param history_limit_timestamp: remove all data older than this timestamp
:param storage_limit_gb: remove oldest data until database is smaller than this value.
"""
_log.debug("Managing store - timestamp limit: {} GB size limit: {}".format(
history_limit_timestamp, storage_limit_gb))
commit = False
if history_limit_timestamp is not None:
count = self.execute_stmt(
'''DELETE FROM ''' + self.data_table +
''' WHERE ts < ?''', (history_limit_timestamp,))
if count is not None and count > 0:
_log.debug("Deleted {} old items from historian. (TTL exceeded)".format(count))
commit = True
if storage_limit_gb is not None:
result = self.select('''PRAGMA page_size''')
page_size = result[0][0]
max_storage_bytes = storage_limit_gb * 1024 ** 3
max_pages = int(ceil(max_storage_bytes / page_size))
def page_count():
result = self.select("PRAGMA page_count")
return result[0][0]
while page_count() >= max_pages:
count = self.execute_stmt(
'''DELETE FROM ''' + self.data_table +
'''
WHERE ts IN
(SELECT ts FROM ''' + self.data_table +
'''
ORDER BY ts ASC LIMIT 100)''')
_log.debug("Deleted 100 old items from historian. (Managing store size)".format(count))
commit = True
if commit:
_log.debug("Committing changes for manage_db_size.")
self.commit()
def insert_meta_query(self):
return '''INSERT OR REPLACE INTO ''' + self.meta_table + \
''' values(?, ?)'''
def update_meta_query(self):
return '''UPDATE ''' + self.meta_table + ''' SET metadata = ?
WHERE topic_id = ?'''
def insert_data_query(self):
return '''INSERT OR REPLACE INTO ''' + self.data_table + \
''' values(?, ?, ?)'''
def insert_topic_query(self):
return '''INSERT INTO ''' + self.topics_table + \
''' (topic_name) values (?)'''
def insert_topic_and_meta_query(self):
return '''INSERT INTO ''' + self.topics_table + \
''' (topic_name, metadata) values (?, ?)'''
def update_topic_query(self):
return '''UPDATE ''' + self.topics_table + ''' SET topic_name = ?
WHERE topic_id = ?'''
def update_topic_and_meta_query(self):
return '''UPDATE ''' + self.topics_table + ''' SET topic_name = ?, metadata = ?
WHERE topic_id = ?'''
def get_aggregation_list(self):
return ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM', 'TOTAL']
def insert_agg_topic_stmt(self):
return '''INSERT INTO ''' + self.agg_topics_table + '''
(agg_topic_name, agg_type, agg_time_period )
values (?, ?, ?)'''
def update_agg_topic_stmt(self):
return '''UPDATE ''' + self.agg_topics_table + ''' SET
agg_topic_name = ? WHERE agg_topic_id = ? '''
def replace_agg_meta_stmt(self):
return '''INSERT OR REPLACE INTO ''' + self.agg_meta_table + '''
values(?, ?)'''
def get_topic_map(self):
_log.debug("in get_topic_map")
q = "SELECT topic_id, topic_name FROM " + self.topics_table
rows = self.select(q, None)
_log.debug("loading topic map from db")
id_map = dict()
name_map = dict()
for t, n in rows:
id_map[n.lower()] = t
name_map[n.lower()] = n
return id_map, name_map
def get_topic_meta_map(self):
q = "SELECT topic_id, metadata FROM " + self.meta_table + ";"
rows = self.select(q, None)
_log.debug("loading metadata from db")
topic_meta_map = dict()
for id, meta in rows:
topic_meta_map[id] = jsonapi.loads(meta)
return topic_meta_map
def get_agg_topics(self):
try:
_log.debug("in get_agg_topics")
query = "SELECT agg_topic_name, agg_type, agg_time_period, metadata FROM " + self.agg_topics_table + \
" as t, " + self.agg_meta_table + " as m WHERE t.agg_topic_id = m.agg_topic_id "
rows = self.select(query, None)
topics = []
for row in rows:
_log.debug("rows from aggregate_t")
meta = ast.literal_eval(row[3])['configured_topics']
topics.append((row[0], row[1], row[2], meta))
return topics
except sqlite3.Error as e:
if e.args[0][0:13] == 'no such table':
_log.warning("No such table : {}".format(self.agg_topics_table))
return []
else:
raise
def get_agg_topic_map(self):
try:
_log.debug("in get_agg_topic_map")
q = "SELECT agg_topic_id, agg_topic_name, agg_type, agg_time_period FROM " + self.agg_topics_table
rows = self.select(q, None)
_log.debug("loading agg_topic map from db")
id_map = dict()
for row in rows:
_log.debug("rows from aggregate_t")
id_map[(row[1].lower(), row[2], row[3])] = row[0]
return id_map
except sqlite3.Error as e:
if e.args[0][0:13] == 'no such table':
_log.warning("No such table : {}".format(self.agg_topics_table))
return {}
else:
raise
@staticmethod
def regexp(expr, item):
_log.debug("item {} matched against expr {}".format(item, expr))
return re.search(expr, item, re.IGNORECASE) is not None
def set_cache(self, cache_size):
self.execute_stmt("PRAGMA CACHE_SIZE={}".format(cache_size))
def regex_select(self, query, args, fetch_all=True, cache_size=None):
conn = None
cursor = None
try:
conn = sqlite3.connect(self.__database, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
if conn is None:
_log.error("Unable to connect to sqlite database {} ".format(self.__database))
return []
conn.create_function("REGEXP", 2, SqlLiteFuncts.regexp)
if cache_size:
conn.execute("PRAGMA CACHE_SIZE={}".format(cache_size))
_log.debug("REGEXP query {} ARGS: {}".format(query, args))
cursor = conn.cursor()
if args is not None:
cursor.execute(query, args)
else:
_log.debug("executing query")
cursor.execute(query)
if fetch_all:
rows = cursor.fetchall()
_log.debug("Regex returning {}".format(rows))
return rows
else:
return cursor, conn
except Exception as e:
_log.error("Exception querying database based on regular expression:{}".format(e.args))
finally:
if fetch_all:
if cursor:
cursor.close()
if conn:
conn.close()
def query_topics_by_pattern(self, topic_pattern):
id_map, name_map = self.get_topic_map()
_log.debug("Contents of topics table {}".format(list(id_map.keys())))
q = "SELECT topic_id, topic_name FROM " + self.topics_table + " WHERE topic_name REGEXP '" + topic_pattern + \
"';"
rows = self.regex_select(q, None)
_log.debug("loading topic map from db")
id_map = dict()
for t, n in rows:
id_map[n] = t
_log.debug("topics that matched the pattern {} : {}".format(topic_pattern, id_map))
return id_map
def create_aggregate_store(self, agg_type, period):
table_name = agg_type + '''_''' + period
stmt = "CREATE TABLE IF NOT EXISTS " + table_name + \
" (ts timestamp NOT NULL, topic_id INTEGER NOT NULL, " \
"agg_value REAL NOT NULL, topics TEXT, " \
"UNIQUE(topic_id, ts)); "
self.execute_stmt(stmt)
stmt = "CREATE INDEX IF NOT EXISTS idx_" + table_name + " ON " + table_name + "(ts ASC);"
self.execute_stmt(stmt, commit=True)
return True
def insert_aggregate_stmt(self, table_name):
return '''INSERT OR REPLACE INTO ''' + table_name + ''' values(?, ?, ?, ?)'''
def collect_aggregate(self, topic_ids, agg_type, start=None, end=None):
"""
This function should return the results of a aggregation query
@param topic_ids: list of single topics
@param agg_type: type of aggregation
@param start: start time
@param end: end time
@return: aggregate value, count of number of records over which
aggregation was computed
"""
if isinstance(agg_type, str):
if agg_type.upper() not in ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM']:
raise ValueError("Invalid aggregation type {}".format(agg_type))
query = '''SELECT ''' + agg_type + '''(value_string), count(value_string) FROM ''' + \
self.data_table + ''' {where}'''
where_clauses = ["WHERE topic_id = ?"]
args = [topic_ids[0]]
if len(topic_ids) > 1:
where_str = "WHERE topic_id IN ("
for _ in topic_ids:
where_str += "?, "
where_str = where_str[:-2] # strip last comma and space
where_str += ") "
where_clauses = [where_str]
args = topic_ids[:]
# base historian converts naive timestamps to UTC, but if the start and end had explicit timezone info then they
# need to get converted to UTC since sqlite3 only store naive timestamp
if start:
start = start.astimezone(pytz.UTC)
if end:
end = end.astimezone(pytz.UTC)
if start and end and start == end:
where_clauses.append("ts = ?")
args.append(start)
else:
if start:
where_clauses.append("ts >= ?")
args.append(start)
if end:
where_clauses.append("ts < ?")
args.append(end)
where_statement = ' AND '.join(where_clauses)
real_query = query.format(where=where_statement)
_log.debug("Real Query: " + real_query)
_log.debug("args: " + str(args))
results = self.select(real_query, args)
if results:
_log.debug("results got {}, {}".format(results[0][0], results[0][1]))
return results[0][0], results[0][1]
else:
return 0, 0
@staticmethod
def get_tagging_query_from_ast(topic_tags_table, tup, tag_refs):
"""
Get a query condition syntax tree and generate sqlite query to query
topic names by tags. It calls the get_compound_query to parse the
abstract syntax tree tuples and then fixes the precedence
Example:
# User input query string :
.. code-block::
campus.geoPostalCode="20500" and equip and boiler and "equip_tag 7" > 4
# Example output sqlite query
.. code-block::
SELECT topic_prefix from test_topic_tags WHERE tag="campusRef"
and value IN(
SELECT topic_prefix from test_topic_tags WHERE tag="campus" and
value=1
INTERSECT
SELECT topic_prefix from test_topic_tags WHERE tag="geoPostalCode"
and value="20500"
)
INTERSECT
SELECT topic_prefix from test_tags WHERE tag="equip" and value=1
INTERSECT
SELECT topic_prefix from test_tags WHERE tag="boiler" and value=1
INTERSECT
SELECT topic_prefix from test_tags WHERE tag = "equip_tag 7" and
value > 4
:param topic_tags_table: table to query
:param tup: parsed query string (abstract syntax tree)
:param tag_refs: dictionary of ref tags and its parent tag
:return: sqlite query
:rtype str
"""
query = SqlLiteFuncts._get_compound_query(topic_tags_table, tup, tag_refs)
# Verify for parent tag finally. if present convert to subquery
# Process parent tag
# Convert
# WHERE tag='campusRef.geoPostalCode' AND value="20500"
# to
# where tag='campusRef' and value IN (
# SELECT topic_prefix FROM test_topic_tags
# WHERE tag='campus' AND value=1
# INTERSECT
# SELECT topic_prefix FROM test_topic_tags
# WHERE tag='geoPostalCode' and value="20500"
# )
parent = ""
search_pattern = r"WHERE\s+tag='(.+)\.(.+)'\s+AND\s+value\s+(.+)($|\n)"
results = re.findall(search_pattern, query, flags=re.IGNORECASE)
# Example result :<type 'list'>: [('campusRef', 'tag1', '= 2', '\n'),
# ('siteRef', 'tag2', '= 3 ', '\n')]
# Loop through and replace comparison operation with sub query
for result in results:
parent = tag_refs[result[0]]
replace_pattern = r"WHERE tag = '\1' AND value IN \n (" \
r"SELECT topic_prefix " \
r"FROM {table} WHERE tag = '{parent}' AND " \
r"value = 1\n " \
r"INTERSECT\n " \
r"SELECT topic_prefix FROM {table} WHERE " \
r"tag = '\2' " \
r"AND " \
r"value \3 \4)".format(table=topic_tags_table,
parent=parent)
query = re.sub(search_pattern, replace_pattern, query, count=1, flags=re.I)
_log.debug("Returning sqlite query condition {}".format(query))
return query
@staticmethod
def _get_compound_query(topic_tags_table, tup, tag_refs, root=True):
"""
Get a query condition syntax tree and generate sqlite query to query
topic names by tags
Example:
# User input query string :
campus.geoPostalCode="20500" and equip and boiler and "equip_tag 7" > 4
SELECT topic_prefix FROM test_topic_tags WHERE tag="campusRef"
and value IN(
SELECT topic_prefix FROM test_topic_tags WHERE tag="campus" AND
value=1
INTERSECT
SELECT topic_prefix FROM test_topic_tags WHERE tag="geoPostalCode"
AND value="20500"
)
INTERSECT
SELECT topic_prefix FROM test_tags WHERE tag="equip" AND value=1
INTERSECT
SELECT topic_prefix FROM test_tags WHERE tag="boiler" AND value=1
INTERSECT
SELECT topic_prefix FROM test_tags WHERE tag = "equip_tag 7" AND
value > 4
:param topic_tags_table: table to query
:param tup: parsed query string (abstract syntax tree)
:param tag_refs: dictionary of ref tags and its parent tag
:param root: Boolean to indicate if it is the top most tuple in the
abstract syntax tree.
:return: sqlite query
:rtype str
"""
# Instead of using sqlite LIKE operator we use python regular expression and sqlite REGEXP operator
reserved_words = {'and': 'INTERSECT', "or": 'UNION', 'not': 'NOT', 'like': 'REGEXP'}
prefix = 'SELECT topic_prefix FROM {} WHERE '.format(topic_tags_table)
if tup is None:
return tup
if not isinstance(tup[1], tuple):
left = repr(tup[1]) # quote the tag
else:
left = SqlLiteFuncts._get_compound_query(topic_tags_table, tup[1], tag_refs, False)
if not isinstance(tup[2], tuple):
if isinstance(tup[2],str):
right = repr(tup[2])
elif isinstance(tup[2], bool):
right = 1 if tup[2] else 0
else:
right = tup[2]
else:
right = SqlLiteFuncts._get_compound_query(topic_tags_table, tup[2], tag_refs, False)
assert isinstance(tup[0], str)
lower_tup0 = tup[0].lower()
operator = lower_tup0
if lower_tup0 in reserved_words:
operator = reserved_words[lower_tup0]
if operator == 'NOT':
query = SqlLiteFuncts._negate_condition(right, topic_tags_table)
elif operator == 'INTERSECT' or operator == 'UNION':
if root:
query = "{left}\n{operator}\n{right}".format(left=left, operator=operator, right=right)
else:
query = 'SELECT topic_prefix FROM ({left} \n{operator}\n{right})'.format(
left=left, operator=operator, right=right)
else:
query = "{prefix} tag={tag} AND value {operator} {value}".format(
prefix=prefix, tag=left, operator=operator, value=right)
return query
@staticmethod
def _negate_condition(condition, table_name):
"""
change NOT(bool_expr AND bool_expr) to NOT(bool_expr) OR NOT(bool_expr)
recursively. In sqlite syntax:
TO negate the following sql query:
SELECT * FROM
(SELECT * FROM
(SELECT topic_prefix FROM topic_tags WHERE tag='tag3' AND value > 1
INTERSECT
SELECT topic_prefix FROM topic_tags WHERE tag='tag2' AND value > 2)
UNION
SELECT topic_prefix FROM topic_tags WHERE tag='tag4' AND value < 2)
We have to change it to:
SELECT * FROM
(SELECT * FROM
(SELECT topic_prefix FROM topic_tags WHERE topic_prefix NOT IN
(SELECT topic_prefix FROM topic_tags WHERE tag='tag3' AND
value > 1)
UNION
SELECT topic_prefix FROM topic_tags WHERE topic_prefix NOT IN
(SELECT topic_prefix FROM topic_tags WHERE tag='tag2' AND
value > 2))
INTERSECT
SELECT topic_prefix FROM topic_tags WHERE topic_prefix NOT IN(
SELECT topic_prefix FROM topic_tags WHERE tag='tag4' AND
value < 2))
:param condition: select query that needs to be negated. It could be a
compound query.
:return: negated select query
:rtype str
"""
_log.debug("Query condition to negate: {}".format(condition))
# Change and to or and or to and
condition = condition.replace('INTERSECT\n', 'UNION_1\n')
condition = condition.replace('UNION\n', 'INTERSECT\n')
condition = condition.replace('UNION_1\n', 'UNION\n')
# Now negate all SELECT... value<operator><value> with
# SELECT topic_prefix FROM topic_tags WHERE topic_prefix NOT IN (SELECT....value<operator><value>)
search_pattern = r'(SELECT\s+topic_prefix\s+FROM\s+' + table_name + \
r'\s+WHERE\s+tag=\'.*\'\s+AND\s+value.*($|\n))'
replace_pattern = r'SELECT topic_prefix FROM ' + table_name + r' WHERE topic_prefix NOT IN (\1)\2'
c = re.search(search_pattern, condition)
condition = re.sub(search_pattern,
replace_pattern,
condition,
flags=re.I
)
_log.debug("Condition after negation: {}".format(condition))
return condition
if __name__ == '__main__':
con = {
"database": '/tmp/tmpgLzWr3/historian.sqlite'
}
tables_def = {
"table_prefix": "prefix",
"data_table": "data_table",
"topics_table": "topics_table",
"meta_table": "meta_table"
}
functs = SqlLiteFuncts(con, tables_def)
functs.collect_aggregate('device1/in_temp', 'sum',
datetime.strptime('2016-06-05 22:47:02.417604+00:00', "%Y-%m-%d %H:%M:%S.%f+00:00"),
datetime.strptime('2016-06-05 22:49:02.417604+00:00', "%Y-%m-%d %H:%M:%S.%f+00:00"))
|
the-stack_0_9734 | import torch
from torch.autograd import Variable
from torch.autograd import Function
import numpy as np
import scipy.linalg
class MatrixSquareRoot(Function):
"""Square root of a positive definite matrix.
NOTE: matrix square root is not differentiable for matrices with
zero eigenvalues.
"""
@staticmethod
def forward(ctx, input):
itr_TH = 10 # number of iterations threshold
dim = input.shape[0]
norm = torch.norm(input)#.double())
#Y = input.double()/norm
Y = input/norm
I = torch.eye(dim,dim,device=input.device)#.double()
Z = torch.eye(dim,dim,device=input.device)#.double()
#print('Check: ', Y.type(), I.type(), Z.type())
for i in range(itr_TH):
T = 0.5*(3.0*I - Z.mm(Y))
Y = Y.mm(T)
Z = T.mm(Z)
sqrtm = Y*torch.sqrt(norm)
ctx.mark_dirty(Y,I,Z)
ctx.save_for_backward(sqrtm)
return sqrtm
@staticmethod
def backward(ctx, grad_output):
itr_TH = 10 # number of iterations threshold
grad_input = None
sqrtm, = ctx.saved_tensors
dim = sqrtm.shape[0]
norm = torch.norm(sqrtm)
A = sqrtm/norm
I = torch.eye(dim, dim, device=sqrtm.device)#.double()
#Q = grad_output.double()/norm
Q = grad_output/norm
for i in range(itr_TH):
Q = 0.5*(Q.mm(3.0*I-A.mm(A))-A.t().mm(A.t().mm(Q)-Q.mm(A)))
A = 0.5*A.mm(3.0*I-A.mm(A))
grad_input = 0.5*Q
return grad_input
sqrtm = MatrixSquareRoot.apply
def original_main():
from torch.autograd import gradcheck
k = torch.randn(20, 10).double()
# Create a positive definite matrix
pd_mat = k.t().matmul(k)
pd_mat = Variable(pd_mat, requires_grad=True)
test = gradcheck(MatrixSquareRoot.apply, (pd_mat,))
print(test)
def single_main():
from torch.autograd import gradcheck
n = 1
A = torch.randn( 20, 10).double()
# Create a positive definite matrix
pd_mat = A.t().matmul(A)
pd_mat = Variable(pd_mat, requires_grad=True)
test = gradcheck(MatrixSquareRoot.apply, (pd_mat,))
print(test)
#sqrtm_scipy = np.zeros_like(A)
print('err: ', pd_mat)
sqrtm_scipy = scipy.linalg.sqrtm(pd_mat.detach().numpy().astype(np.float_))
# for i in range(n):
# sqrtm_scipy[i] = sqrtm(pd_mat[i].detach().numpy())
sqrtm_torch = sqrtm(pd_mat)
print('sqrtm torch: ', sqrtm_torch)
print('scipy', sqrtm_scipy)
print('Difference: ', np.linalg.norm(sqrtm_scipy - sqrtm_torch.detach().numpy()))
def main():# batch
from torch.autograd import gradcheck
n = 2
A = torch.randn(n, 4, 5).double()
A.requires_grad = True
# Create a positive definite matrix
#pd_mat = A.t().matmul(A)
pd_mat = torch.matmul(A.transpose(-1, -2), A)
pd_mat = Variable(pd_mat, requires_grad=True)
print('err: ', pd_mat.shape)
#test = gradcheck(MatrixSquareRoot.apply, (pd_mat,))
#print(test)
sqrtm_scipy = np.zeros_like(pd_mat.detach().numpy())
#sqrtm_scipy = scipy.linalg.sqrtm(pd_mat.detach().numpy().astype(np.float_))
for i in range(n):
sqrtm_scipy[i] = scipy.linalg.sqrtm(pd_mat[i].detach().numpy())
# batch implementation
sqrtm_torch = torch.zeros(pd_mat.shape)
for i in range(n):
sqrtm_torch[i] = sqrtm(pd_mat[i])
#sqrtm_torch = sqrtm(pd_mat)
print('sqrtm torch: ', sqrtm_torch)
print('scipy', sqrtm_scipy)
print('Difference: ', np.linalg.norm(sqrtm_scipy - sqrtm_torch.detach().numpy()))
if __name__ == '__main__':
main()
|
the-stack_0_9737 | from __future__ import absolute_import, print_function, division
import copy
import numpy as np
import logging
import pdb
import time
from six import iteritems
from six.moves import xrange
import sys
import theano
from theano import tensor, scalar, gof, config
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.gof import (local_optimizer, EquilibriumDB, TopoOptimizer,
LocalGroupDB,
SequenceDB, Optimizer, DB, toolbox, graph)
from theano.ifelse import IfElse
from theano.misc.ordered_set import OrderedSet
from theano.scalar.basic import Scalar, Pow, Cast
from theano.scalar.basic_scipy import Erfinv, Erfcinv
from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.nnet import bn
from theano.tensor.nnet.conv import ConvOp
from theano.tensor.nnet.blocksparse import SparseBlockGemv, SparseBlockOuter
from theano.tensor.nnet.abstract_conv import (BaseAbstractConv,
AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs)
import theano.tensor.nlinalg as nlinalg
import theano.tensor.signal.pool as pool
import theano.tensor.slinalg as slinalg
from theano.tests.breakpoint import PdbBreakpoint
from .type import (GpuArrayType, GpuArrayConstant, get_context,
ContextNotDefined, move_to_gpu)
from .basic_ops import (as_gpuarray_variable, infer_context_name,
host_from_gpu, GpuToGpu,
HostFromGpu, GpuFromHost,
GpuSplit, GpuContiguous, gpu_contiguous,
GpuAlloc, GpuAllocEmpty, GpuReshape,
GpuEye, gpu_join, GpuJoin)
from .blas import (gpu_dot22, GpuGemm, GpuGer, GpuGemmBatch,
gpugemm_no_inplace, gpugemm_inplace,
gpugemmbatch_no_inplace,
gpugemv_no_inplace, gpugemv_inplace,
GpuCorrMM, GpuCorrMM_gradInputs, GpuCorrMM_gradWeights,
GpuCorr3dMM, GpuCorr3dMM_gradInputs, GpuCorr3dMM_gradWeights)
from .pool import (GpuPool, GpuMaxPoolGrad, GpuAveragePoolGrad, GpuMaxPoolRop,
GpuDownsampleFactorMaxGradGrad)
from .blocksparse import (GpuSparseBlockGemv, GpuSparseBlockOuter,
gpu_sparse_block_outer,
gpu_sparse_block_outer_inplace,
gpu_sparse_block_gemv, gpu_sparse_block_gemv_inplace)
from .nnet import (gpu_crossentropy_softmax_1hot_with_bias_dx,
gpu_crossentropy_softmax_argmax_1hot_with_bias,
gpu_softmax_with_bias, gpu_softmax)
from .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda,
GpuCAReduceCPY, gpu_erfinv, gpu_erfcinv,
max_inputs_to_GpuElemwise)
from .subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor,
GpuAdvancedSubtensor1,
GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)
from .opt_util import alpha_merge, output_merge, pad_dims, unpad_dims
from .reduction import GpuMaxAndArgmax
from .linalg import (GpuCusolverSolve, MATRIX_STRUCTURES_SOLVE, GpuCholesky,
cusolver_available, GpuMagmaMatrixInverse, GpuMagmaSVD)
_logger = logging.getLogger("theano.gpuarray.opt")
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
# Not used for an EquilibriumOptimizer. It has the "tracks" that we need for GraphToGPUDB.
gpu_optimizer2 = EquilibriumDB()
class GraphToGPUDB(DB):
"""
Retrieves the list local optimizers based on the optimizer flag's value
from EquilibriumOptimizer by calling the method query.
"""
def query(self, *tags, **kwtags):
opt = gpu_optimizer2.query(*tags, **kwtags)
return GraphToGPU(opt.local_optimizers_all, opt.local_optimizers_map)
gpu_seqopt = SequenceDB()
gpu_seqopt.register('gpuarray_graph_optimization', GraphToGPUDB(), -0.5,
'fast_compile', 'fast_run', 'gpuarray')
gpu_seqopt.register('gpuarray_local_optimizations', gpu_optimizer, 1,
'fast_compile', 'fast_run', 'gpuarray', 'gpuarray_local_optimiziations')
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_compile', 'fast_run', 'gpuarray')
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
def register_opt2(tracks, *tags, **kwargs):
'''
Decorator for the new GraphToGPU optimizer.
Takes an extra parameter(Op) compared to register_opt decorator.
Parameters
----------
tracks : List of Op class Or Op instance or None
The Node's Op to which optimization is being applied.
tags : String
The optimization tag to which the optimizer will be registered.
'''
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
if isinstance(local_opt, theano.gof.DB):
opt = local_opt
else:
opt = theano.gof.local_optimizer(tracks)(local_opt)
gpu_optimizer2.register(name, opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
def register_inplace(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
optdb.register(
name, TopoOptimizer(
local_opt, failure_callback=TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace', 'gpuarray', *tags)
return local_opt
return f
register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)
register_opt(final_opt=True, name='gpua_constant_folding')(
tensor.opt.constant_folding)
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
def safe_to_gpu(x, ctx_name):
if isinstance(x.type, tensor.TensorType):
return GpuFromHost(ctx_name)(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, GpuArrayType):
return x.transfer('cpu')
else:
return x
def op_lifter(OP, cuda_only=False):
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
def f(maker):
def local_opt(node):
if type(node.op) in OP:
# Either one of our inputs is on the gpu or
# all of our clients are on the gpu
replace = False
# TODO: Maybe set context_name with infer_context_name()?
context_name = None
# We replace if any input is a host_from_gpu
for i in node.inputs:
if (i.owner and i.owner.op == host_from_gpu and
move_to_gpu(i)):
context_name = i.owner.inputs[0].type.context_name
replace = True
break
if not replace:
# We replace if *all* clients are on the GPU
clients = [c for o in node.outputs for c in o.clients]
replace = len(clients) != 0
for c, idx in clients:
if (c == 'output' or
not isinstance(c.op, GpuFromHost)):
replace = False
# TODO: check that the clients want the same context?
if replace:
# All clients are GpuFromHost and we have at least one
context_name = clients[0][0].op.context_name
# Check if we should replace
if (not replace or
(cuda_only and
get_context(context_name).kind != b'cuda') or
any(["complex" in getattr(i, 'dtype', "")
for i in node.inputs])):
return False
# tag the inputs with the context in case
# the context was derived from the outputs
for i in node.inputs:
i.tag.context_name = context_name
new_op = maker(node.op, context_name, node.inputs, node.outputs)
# This is needed as sometimes new_op inherits from OP.
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
return [safe_to_cpu(o) for o in
new_op(*node.inputs, return_list=True)]
elif isinstance(new_op, (tuple, list)):
return [safe_to_cpu(o) for o in new_op]
else: # suppose it is a variable on the GPU
return [new_op.transfer('cpu')]
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f
class InputToGpuOptimizer(Optimizer):
"""
Transfer the input to the gpu to start the rolling wave.
"""
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, GpuArrayType):
continue
# If all clients are outputs or transfers don't do anything.
if (all(cl[0] == 'output' or isinstance(cl[0].op, GpuFromHost)
for cl in input.clients)):
continue
target = getattr(input.tag, 'target', None)
if target == 'cpu':
continue
if (isinstance(input.type, tensor.TensorType) and
not move_to_gpu(input)):
continue
try:
new_input = GpuFromHost(target)(input).transfer('cpu')
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError:
# This could fail if the inputs are not TensorTypes
pass
except ContextNotDefined:
if hasattr(input.tag, 'target'):
raise
# If there is no context tag and no default context
# then it stays on the CPU
pass
gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
class GraphToGPU(Optimizer):
"""
Transfer the graph as a whole to GPU instead of transfering node by node.
Parameters
----------
local_optimizers_all : List or SortedSet
The local optimizations to apply to a node.
local_optimizers_map : Dict
Dictionary object containing the mapping of Op to list of
LocalOptimizers.
"""
def __init__(self, local_optimizers_all, local_optimizers_map):
self.local_optimizers_all = local_optimizers_all
self.local_optimizers_map = local_optimizers_map
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
mapping = {}
time_opts = {}
node_created = {}
process_count = {}
t_topo = time.time()
topo = fgraph.toposort()
time_topo = time.time()
toposort_timing = time_topo - t_topo
# Building a new graph
# Iterating through inputs of graph
target = infer_context_name(*fgraph.inputs)
for i in fgraph.inputs:
if isinstance(i.type, tensor.TensorType) and move_to_gpu(i):
mapping[i] = i.transfer(getattr(i.tag, 'target', target))
else:
mapping[i] = i
for i in fgraph.variables:
if isinstance(i, theano.Constant):
mapping[i] = i
for node in topo:
for lopt in (self.local_optimizers_map.get(node.op, []) +
self.local_optimizers_map.get(type(node.op), []) +
self.local_optimizers_all):
process_count.setdefault(lopt, 0)
time_opts.setdefault(lopt, 0)
node_created.setdefault(lopt, 0)
for node in topo:
if isinstance(node.op, HostFromGpu):
mapping[node.outputs[0]] = mapping[node.inputs[0]]
continue
# Move only if any of the inputs are on the GPU.
move_to_GPU = False
context_name = None
for i in [mapping[i] for i in node.inputs]:
if isinstance(i.type, GpuArrayType):
context_name = i.type.context_name
move_to_GPU = True
break
if (not move_to_GPU and
isinstance(node.op, (theano.tensor.Alloc,
theano.tensor.AllocEmpty,
theano.tensor.basic.Eye))):
# If the Alloc[Empty] have a client that will be moved
# to the GPU, we should move the Alloc* on the GPU.
# We approximate this by supposing that if we have an
# optimization for one of the clients op, then we will
# move the client to the GPU.
for c, _ in node.outputs[0].clients:
if (c != 'output' and
(self.local_optimizers_map.get(c.op, []) +
self.local_optimizers_map.get(type(c.op), []))):
move_to_GPU = True
new_ops = None
if move_to_GPU and any(["complex" in getattr(i, 'dtype', "")
for i in node.inputs]):
move_to_GPU = False
# Apply the lifter
if move_to_GPU:
for lopt in (self.local_optimizers_map.get(node.op, []) +
self.local_optimizers_map.get(type(node.op), []) +
self.local_optimizers_all):
t_opt = time.time()
new_ops = lopt.transform(node.op, context_name,
[mapping[i] for i in node.inputs],
node.outputs)
t_opt2 = time.time()
time_opts[lopt] += t_opt2 - t_opt
if new_ops:
process_count[lopt] += 1
break
outputs = []
if isinstance(new_ops, theano.Op):
outputs = new_ops(*[mapping[i] for i in node.inputs], return_list=True)
elif not new_ops:
newnode = node.clone_with_new_inputs([mapping.get(i) for i in node.inputs])
outputs = newnode.outputs
elif isinstance(new_ops, (tuple, list)):
outputs = new_ops
elif isinstance(new_ops, theano.Variable):
outputs = [new_ops]
if new_ops:
node_created[lopt] += len(graph.ops([mapping[i] for i in node.inputs], outputs))
if any([getattr(old_o, 'dtype', None) != getattr(new_o, 'dtype', None)
for old_o, new_o in zip(outputs, node.outputs)]):
_logger.warning(
"The optimization %s returned bad dtype. Skipping it."
" Write to theano-dev mailing list about this." %
str(lopt))
newnode = node.clone_with_new_inputs([mapping.get(i) for i in node.inputs])
outputs = newnode.outputs
for new_o, old_o in zip(outputs, node.outputs):
assert len(outputs) == len(node.outputs)
mapping[old_o] = new_o
new_nodes = []
for o in fgraph.outputs:
new_o = mapping[o]
if new_o.type != o.type:
assert isinstance(o.type, tensor.TensorType)
assert isinstance(new_o.type, GpuArrayType)
# This condition is needed in the case one input is an
# output of the graph. Without this, it would
# introduce cycle as we don't replace correctly that
# case. It would also add extra transfer to/from the
# gpu.
if (new_o.owner and
isinstance(new_o.owner.op, GpuFromHost) and
new_o.owner.inputs[0].type == o.type):
new_o = new_o.owner.inputs[0]
else:
new_o = safe_to_cpu(new_o)
new_nodes.append(new_o)
fgraph.replace_all_validate(zip(fgraph.outputs, new_nodes),
reason=self.__class__.__name__)
return (self, toposort_timing, time_opts, node_created, process_count)
@staticmethod
def print_profile(stream, prof, level=0):
(opt, toposort_timing, time_opts, node_created, process_count) = prof
blanc = (' ' * level)
print(blanc, "GraphToGPUOptimizer", end=' ', file=stream)
print(blanc, getattr(opt, "name",
getattr(opt, "__name__", "")), file=stream)
print(blanc, " time io_toposort %.3fs" % toposort_timing, file=stream)
s = sum(time_opts.values())
print(blanc, "Total time taken by local optimizers %.3fs " % s, file=stream)
count_opt = []
not_used = []
not_used_time = 0
for o, count in iteritems(process_count):
if count > 0:
count_opt.append((time_opts[o], count,
node_created[o], o))
else:
not_used.append((time_opts[o], o))
not_used_time += time_opts[o]
if count_opt:
print(blanc,
' times - times applied - Node created - name:',
file=stream)
count_opt.sort()
for (t, count, n_created, o) in count_opt[::-1]:
print(blanc, ' %.3fs - %d - %d - %s' % (
t, count, n_created, o), file=stream)
print(blanc, ' %.3fs - in %d optimization that were not used (display only those with a runtime > 0)' % (
not_used_time, len(not_used)), file=stream)
not_used.sort(key=lambda nu: (nu[0], str(nu[1])))
for (t, o) in not_used[::-1]:
if t > 0:
# Skip opt that have 0 times, they probably wasn't even tried.
print(blanc + " ", ' %.3fs - %s' % (t, o), file=stream)
print(file=stream)
@staticmethod
def merge_profile(prof1, prof2):
# (opt, toposort_timing, time_opts, node_created, process_count) = prof1
local_optimizers = OrderedSet(prof1[0].local_optimizers_all).union(
prof2[0].local_optimizers_all)
def merge_dict(d1, d2):
"""
merge 2 dicts by adding the values.
"""
d = d1.copy()
for k, v in iteritems(d2):
if k in d:
d[k] += v
else:
d[k] = v
return d
local_optimizers_map = merge_dict(prof1[0].local_optimizers_map,
prof2[0].local_optimizers_map)
new_opt = GraphToGPU(local_optimizers, local_optimizers_map)
toposort_timing = prof1[1] + prof2[1]
time_opts = merge_dict(prof1[2], prof2[2])
node_created = merge_dict(prof1[3], prof2[3])
process_count = merge_dict(prof1[4], prof2[4])
return (new_opt,
toposort_timing,
time_opts,
node_created,
process_count)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print("%s%s (%i)" % (
(' ' * level), self.__class__.__name__, id(self)), file=stream)
if depth != 0:
map_values = []
for opts in self.local_optimizers_map.values():
map_values += opts
for opt in self.local_optimizers_all + map_values:
opt.print_summary(stream, level=(level + 2), depth=(depth - 1))
@local_optimizer([GpuFromHost, GpuToGpu, HostFromGpu])
def local_cut_gpu_transfers(node):
# gpu[ab] -> host -> gpub
if (isinstance(node.op, GpuFromHost) and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, HostFromGpu)):
other = node.inputs[0].owner.inputs[0]
if node.op.context_name == other.type.context_name:
return [other]
else:
return [GpuToGpu(node.op.context_name)(other)]
# ? -> gpua -> host
elif (isinstance(node.op, HostFromGpu) and
node.inputs[0].owner):
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [n2.inputs[0]]
# gpub ->
if isinstance(n2.op, GpuToGpu):
return [n2.inputs[0].transfer('cpu')]
# ? -> gpua -> gpub
elif isinstance(node.op, GpuToGpu):
# Transfer within same context
if node.inputs[0].type.context_name == node.op.context_name:
return [node.inputs[0]]
if node.inputs[0].owner:
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [as_gpuarray_variable(n2.inputs[0],
node.op.context_name)]
# gpuc ->
if isinstance(n2.op, GpuToGpu):
if node.op.context_name == n2.inputs[0].type.context_name:
return [n2.inputs[0]]
else:
return [node.op(n2.inputs[0])]
gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
gpu_cut_copies.register('cut_gpua_constant_transfers',
tensor.opt.constant_folding,
'fast_compile', 'fast_run', 'gpuarray')
optdb['canonicalize'].register('local_cut_gpua_host_gpua',
local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
@register_opt('fast_compile')
@local_optimizer([tensor.Alloc])
def local_gpua_alloc2(node):
"""
Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Moves an alloc that is an input to join to the gpu.
"""
try:
get_context(None)
except ContextNotDefined:
# If there is no default context then we do not perform the move here.
return
if (isinstance(node.op, tensor.Alloc) and
all(c != 'output' and
isinstance(c.op, tensor.Join) and
all(i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:])
for c, idx in node.outputs[0].clients)):
return [GpuAlloc(None)(*node.inputs).transfer('cpu')]
@register_opt('fast_compile')
@op_lifter([tensor.Alloc])
@register_opt2([tensor.Alloc], 'fast_compile')
def local_gpuaalloc(op, context_name, inputs, outputs):
return GpuAlloc(context_name)(*inputs)
@register_opt('fast_compile')
@op_lifter([tensor.AllocEmpty])
@register_opt2([tensor.AllocEmpty], 'fast_compile')
def local_gpua_alloc_empty(op, context_name, inputs, outputs):
# We use _props_dict() to make sure that the GPU op know all the
# CPU op props.
return GpuAllocEmpty(context_name=context_name, **op._props_dict())(*inputs)
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, GpuArrayConstant) and
inp.data.size == 1 and
(np.asarray(inp.data) == 0).all()):
new_op = GpuAlloc(node.op.context_name, memset_0=True)
return [new_op(*node.inputs)]
# Don't register by default.
@gof.local_optimizer([GpuAllocEmpty])
def local_gpua_alloc_empty_to_zeros(node):
if isinstance(node.op, GpuAllocEmpty):
context_name = infer_context_name(*node.inputs)
z = np.asarray(0, dtype=node.outputs[0].dtype)
return [GpuAlloc(context_name)(as_gpuarray_variable(z, context_name),
*node.inputs)]
optdb.register('local_gpua_alloc_empty_to_zeros',
theano.tensor.opt.in2out(local_gpua_alloc_empty_to_zeros),
# After move to gpu and merge2, before inplace.
49.3,
'alloc_empty_to_zeros',)
@register_opt()
@local_optimizer([GpuContiguous])
def local_gpu_contiguous_gpu_contiguous(node):
"""
gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x)
"""
if isinstance(node.op, GpuContiguous):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, GpuContiguous):
return [inp]
@register_opt('fast_compile')
@op_lifter([tensor.extra_ops.CpuContiguous])
@register_opt2([tensor.extra_ops.CpuContiguous], 'fast_compile')
def local_gpua_contiguous(op, context_name, inputs, outputs):
return gpu_contiguous
@register_opt('fast_compile')
@op_lifter([tensor.Reshape])
@register_opt2([tensor.Reshape], 'fast_compile')
def local_gpua_reshape(op, context_name, inputs, outputs):
res = GpuReshape(op.ndim)
return res
@register_opt('fast_compile')
@op_lifter([tensor.Rebroadcast])
@register_opt2([tensor.Rebroadcast], 'fast_compile')
def local_gpua_rebroadcast(op, context_name, inputs, outputs):
return op(as_gpuarray_variable(inputs[0], context_name))
@register_opt('fast_compile')
@op_lifter([tensor.Flatten])
@register_opt2([tensor.Flatten], 'fast_compile')
def local_gpua_flatten(op, context_name, inputs, outputs):
shp = []
if op.outdim != 1:
shp = [inputs[0].shape[i] for i in range(op.outdim - 1)]
shp += [-1]
res = GpuReshape(op.outdim)
o = res(inputs[0], theano.tensor.as_tensor_variable(shp))
return o
@register_opt('fast_compile')
@op_lifter([tensor.Elemwise])
@register_opt2([tensor.Elemwise], 'fast_compile')
def local_gpua_elemwise(op, context_name, inputs, outputs):
scal_op = op.scalar_op
name = op.name
if name:
name = 'Gpu' + name
if len(outputs) > 1:
return
have_cuda = False
have_opencl = False
if inputs and isinstance(inputs[0].type, GpuArrayType):
kind = inputs[0].type.context.kind
if kind.startswith(b'opencl'):
have_opencl = True
elif kind.startswith(b'cuda'):
have_cuda = True
convert = {Erfinv: gpu_erfinv,
Erfcinv: gpu_erfcinv}
if scal_op.__class__ in convert:
scal_op = convert[scal_op.__class__]
if have_opencl:
_logger.warning(
'Function "%s" is not supported with OpenCL. Use "device=cuda" instead.' %
scal_op)
if not have_cuda:
return None
res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec)
# If the elemwise operation is a pow, casts might be required on the
# inputs and or outputs because only the (float, float)->float and
# (double, double)->double cases are implemented at the moment.
if isinstance(op.scalar_op, Pow):
# Only transfer the computation on the gpu if the output dtype is
# floating point. Else, give up on the transfer to the gpu.
out_dtype = outputs[0].dtype
if out_dtype not in ['float16', 'float32', 'float64']:
return
# Transfer the inputs on the GPU and cast them to the right dtype.
new_inputs = []
for inp in inputs:
if inp.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp, context_name)))
else:
new_inputs.append(as_gpuarray_variable(inp, context_name))
# Perform the exponent on the gpu and transfer the output back to the
# cpu.
gpu_output = res(*new_inputs)
return [gpu_output]
elif op.scalar_op in (scalar.add, scalar.mul):
try:
return [split_inputs(inputs, max_inputs_to_GpuElemwise(outputs), res)]
except ValueError:
return False
else:
return res
def split_inputs(inputs, max_nb_inputs, op):
"""
For some ops like add and mul, a large number of inputs can make nvcc fail
compilation of our current code. We don't want node in the graph that can't
execute as this break DebugMode.
This should not happen for other GpuElemwise as their is only the fusion
that can generate op with too much input and it check for that.
Parameters
----------
inputs: List of theano variables.
List of inputs to node.
max_nb_inputs: int
Maximum number of inputs the node can handle without
compilation fail.
op : Theano operator instance.
Operator that should be used to rebuild the computation graph with smaller
number of inputs per node.
"""
if max_nb_inputs <= 1 and len(inputs) > 1:
raise ValueError("Can not split nodes because inputs' dimensionality and/or"
" number of outputs is too large")
while len(inputs) > max_nb_inputs:
inner_ops = []
for i in range(0, len(inputs), max_nb_inputs):
inner_ops.append(op(*inputs[i: i + max_nb_inputs]))
inputs = inner_ops
return op(*inputs)
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
optdb.register('gpua_elemwise_fusion',
# 48.5 move to gpu
# 48.6 specialize
# 49 cpu fusion
# 49.5 add destroy handler
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 49,
'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')
inplace_gpu_elemwise_opt = tensor.opt.InplaceElemwiseOptimizer(
GpuElemwise)
optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,
'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')
register_opt(tensor.opt.local_useless_elemwise)
@register_opt('fast_compile')
@op_lifter([tensor.DimShuffle])
@register_opt2([tensor.DimShuffle], 'fast_compile')
def local_gpua_dimshuffle(op, context_name, inputs, outputs):
return GpuDimShuffle(op.input_broadcastable,
op.new_order)
@register_opt('fast_compile')
@op_lifter([tensor.SpecifyShape])
@register_opt2([tensor.SpecifyShape], 'fast_compile')
def local_gpua_specifyShape(op, context_name, inputs, outputs):
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_specifyShape_graph(op, context_name, inputs, outputs)
@register_opt2([tensor.SpecifyShape], 'fast_compile')
def local_gpua_specifyShape_graph(op, context_name, inputs, outputs):
inp = [as_gpuarray_variable(inputs[0], context_name)]
inp += inputs[1:]
return tensor.specify_shape(*inp)
@register_opt('fast_compile')
@op_lifter([theano.compile.ops.Shape])
def local_gpua_shape(op, context_name, inputs, outputs):
# op_lifter will call this opt too frequently as the output is
# always on the CPU.
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_shape_graph(op, context_name, inputs, outputs)
@register_opt2([tensor.compile.ops.Shape], 'fast_compile')
def local_gpua_shape_graph(op, context_name, inputs, outputs):
return [as_gpuarray_variable(inputs[0], context_name).shape]
def gpu_print_wrapper(op, cnda):
op.old_op.global_fn(op.old_op, np.asarray(cnda))
@register_opt('fast_compile')
@op_lifter([tensor.printing.Print])
@register_opt2([tensor.printing.Print], 'fast_compile')
def local_gpua_print_op(op, context_name, inputs, outputs):
x, = inputs
gpu_x = as_gpuarray_variable(x, context_name=context_name)
new_op = op.__class__(global_fn=gpu_print_wrapper)
new_op.old_op = op
return new_op(gpu_x)
@register_opt('fast_compile')
@local_optimizer([PdbBreakpoint])
def local_gpu_pdbbreakpoint_op(node):
if isinstance(node.op, PdbBreakpoint):
old_inputs = node.inputs
old_outputs = node.outputs
new_inputs = node.inputs[:1]
input_transfered = []
# Go through the monitored variables, only transfering on GPU those
# for which the input comes from the GPU or the output will be
# transfered on the GPU.
nb_monitored_vars = len(node.outputs)
for i in range(nb_monitored_vars):
inp = old_inputs[i + 1]
out = old_outputs[i]
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = False
for c in out.clients:
if c == 'output':
continue
if isinstance(c[0].op, GpuFromHost):
output_goes_to_gpu = True
context_name = c[0].op.context_name
break
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
new_inputs.append(inp.owner.inputs[0])
input_transfered.append(True)
elif output_goes_to_gpu:
# The input should be transfered to the gpu
new_inputs.append(as_gpuarray_variable(inp, context_name))
input_transfered.append(True)
else:
# No transfer is required.
new_inputs.append(inp)
input_transfered.append(False)
# Only continue the optimization if at least one input has been
# transfered to the gpu
if not any(input_transfered):
return False
# Apply the op on the new inputs
new_op_outputs = node.op(*new_inputs, return_list=True)
# Propagate the transfer to the gpu through the outputs that require
# it
new_outputs = []
for i in range(len(new_op_outputs)):
if input_transfered[i]:
new_outputs.append(new_op_outputs[i].transfer('cpu'))
else:
new_outputs.append(new_op_outputs[i])
return new_outputs
return False
@register_opt('fast_compile')
@op_lifter([IfElse])
@register_opt2([IfElse], 'fast_compile')
def local_gpua_lazy_ifelse(op, context_name, inputs, outputs):
if op.gpu:
return
c = inputs[0]
inps = []
falses = []
# ifelse need corresponding true/false inputs variables to be of the same type.
# But we can't rely on inputs to respect that, as GraphToGPU don't enforce that.
# So we need to take care of this here.
for v1, v2 in zip(inputs[1:1 + op.n_outs], inputs[1 + op.n_outs:]):
if ((isinstance(v1.type, tensor.TensorType) and move_to_gpu(v1)) or
isinstance(v1.type, GpuArrayType) or
isinstance(v2.type, GpuArrayType)):
inps.append(as_gpuarray_variable(v1, context_name))
falses.append(as_gpuarray_variable(v2, context_name))
else:
inps.append(v1)
falses.append(v2)
inps.extend(falses)
return IfElse(op.n_outs, gpu=True)(c, *inps, return_list=True)
@register_opt('fast_compile')
@op_lifter([tensor.Join])
@register_opt2([tensor.Join], 'fast_compile')
def local_gpua_join(op, context_name, inputs, outputs):
return gpu_join
@register_opt('fast_compile')
@local_optimizer([GpuJoin])
def local_gpua_join_1(node):
# join of a single element
if (isinstance(node.op, GpuJoin) and
len(node.inputs) == 2):
return [node.inputs[1]]
@register_opt('fast_compile')
@op_lifter([tensor.Split])
@register_opt2([tensor.Split], 'fast_compile')
def local_gpua_split(op, context_name, inputs, outputs):
# TODO use props
return GpuSplit(op.len_splits)
@register_opt('fast_compile')
@op_lifter([tensor.Subtensor])
def local_gpua_subtensor(op, context_name, inputs, outputs):
x = inputs[0]
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n, _ in outputs[0].clients]):
return
else:
return [gpu_x.owner.op(outputs[0]).transfer('cpu')]
return GpuSubtensor(op.idx_list)
@register_opt2([tensor.Subtensor], 'fast_compile')
def local_gpua_subtensor_graph(op, context_name, inputs, outputs):
# We need different code as the condition is different as inputs
# aren't the same.
x = inputs[0]
# We don't want to move the subtensor to the GPU if the inputs is
# on the CPU and the only client of the CPU node is this
# subtensor. This allow to have a smaller transfer.
if (x.owner and isinstance(x.owner.op, GpuFromHost)):
cpu_x = x.owner.inputs[0]
# And it is a shared var or an input of the graph.
# and is used by only 1 node.
# x is in the new graph, so we can't tests its number of clients.
if not cpu_x.owner and len(cpu_x.clients) == 1:
c = outputs[0].clients
# If the subtensor have only 1 client, do it on the CPU.
# We let the other optimization to take care to move the
# next node or not.
if len(c) == 1:
return
return GpuSubtensor(op.idx_list)
@register_opt('fast_compile')
@op_lifter([tensor.IncSubtensor])
@register_opt2([tensor.IncSubtensor], 'fast_compile')
def local_gpua_inc_subtensor(op, context_name, inputs, outputs):
op = GpuIncSubtensor(op.idx_list, op.inplace,
op.set_instead_of_inc,
op.destroyhandler_tolerate_aliased)
ret = op(*inputs)
val = getattr(outputs[0].tag, 'nan_guard_mode_check', True)
ret.tag.nan_guard_mode_check = val
return ret
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor1])
@register_opt2([tensor.AdvancedSubtensor1], 'fast_compile')
def local_gpua_advanced_subtensor1(op, context_name, inputs, outputs):
return GpuAdvancedSubtensor1()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor])
@register_opt2([tensor.AdvancedSubtensor], 'fast_compile')
def local_gpua_advanced_subtensor(op, context_name, inputs, outputs):
return GpuAdvancedSubtensor()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedIncSubtensor1])
@register_opt2([tensor.AdvancedIncSubtensor1], 'fast_compile')
def local_gpua_advanced_incsubtensor(op, context_name, inputs, outputs):
context = get_context(context_name)
# This is disabled on non-cuda contexts
if context.kind != b'cuda':
return None
x, y, ilist = inputs
set_instead_of_inc = op.set_instead_of_inc
compute_capability = int(context.bin_id[-2])
if compute_capability >= 2 and x.ndim == 1 and y.ndim == 0:
x = x.dimshuffle(0, 'x')
y = y.dimshuffle('x', 'x')
ret = GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)
ret = GpuDimShuffle(ret.type.broadcastable, [0])(ret)
return ret
elif compute_capability < 2 or x.ndim != 2 or y.ndim != 2:
return GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)
else:
return GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)
@register_inplace()
@local_optimizer([GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20])
def local_advincsub1_gpua_inplace(node):
if isinstance(node.op, (GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)):
if not node.op.inplace:
return [node.op.clone_inplace()(*node.inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod])
@register_opt2([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod], 'fast_compile')
def local_gpua_careduce(op, context_name, inputs, outputs):
if isinstance(op.scalar_op, (scalar.Add, scalar.Mul,
scalar.Maximum, scalar.Minimum)):
ctx = get_context(context_name)
if ctx.kind == b'opencl':
op2 = GpuCAReduceCPY
if op.scalar_op not in [scalar.add, scalar.mul]:
# We don't support yet all reduction with cpy code.
return
elif ctx.kind == b'cuda':
op2 = GpuCAReduceCuda
else:
return False
x, = inputs
greduce = op2(
op.scalar_op, axis=op.axis,
dtype=getattr(op, 'dtype', outputs[0].dtype),
acc_dtype=getattr(op, 'acc_dtype', None))
gvar = greduce(x)
# We need to have the make node called, otherwise the mask can
# be None
if (op2 is GpuCAReduceCPY or
gvar.owner.op.supports_c_code([
as_gpuarray_variable(x, context_name)])):
return greduce
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
if op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
new_in_shp = [shape_i(x, 0)]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= shape_i(x, i)
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(shape_i(x, i))
new_axis = []
for idx, m in enumerate(new_mask):
if m == 1:
new_axis.append(idx)
greduce = op2(
op.scalar_op,
axis=new_axis, reduce_mask=new_mask,
dtype=getattr(op, 'dtype', outputs[0].dtype),
acc_dtype=getattr(op, 'acc_dtype', None))
reshaped_x = x.reshape(tensor.stack(new_in_shp))
gpu_reshaped_x = as_gpuarray_variable(reshaped_x, context_name)
gvar = greduce(gpu_reshaped_x)
# We need to have the make node called, otherwise the mask can
# be None
reshaped_gpu_inputs = [gpu_reshaped_x]
if greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = greduce(gpu_reshaped_x)
if reduce_reshaped_x.ndim != outputs[0].ndim:
out_shp = []
for i in range(x.ndim):
if i not in op.axis:
out_shp.append(shape_i(x, i))
unreshaped_reduce = GpuReshape(len(out_shp))(reduce_reshaped_x,
tensor.stack(out_shp))
else:
unreshaped_reduce = reduce_reshaped_x
return [unreshaped_reduce]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv])
@register_opt2([tensor.blas.Gemv], 'fast_compile')
def local_gpua_gemv(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
# Use gemm implementation as cublas gemv don't support float16
return gpugemm_no_inplace(inputs[0][:, None],
inputs[1],
inputs[2],
inputs[3][:, None],
inputs[4]).dimshuffle(0)
if inputs[0].dtype not in ['float32', 'float64']:
return
if op.inplace:
return gpugemv_inplace
else:
return gpugemv_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemm])
@register_opt2([tensor.blas.Gemm], 'fast_compile')
def local_gpua_gemm(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float16', 'float32', 'float64']:
return
if op.inplace:
return gpugemm_inplace
else:
return gpugemm_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.BatchedDot])
@register_opt2([tensor.blas.BatchedDot], 'fast_compile')
def local_gpua_gemmbatch(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float32', 'float64']:
return
a, b = inputs
# Since GpuGemmBatch only supports 3D inputs and output,
# we need to add broadcastable dims to the inputs, and drop
# them from outputs
output_dims = [0, 1, 2]
if a.ndim == 2:
a = GpuDimShuffle(a.broadcastable, (0, 'x', 1))(a)
del output_dims[1]
if b.ndim == 2:
b = GpuDimShuffle(b.broadcastable, (0, 1, 'x'))(b)
del output_dims[-1]
# In case of mismatched dtypes, we also have to upcast
out_dtype = outputs[0].dtype
if a.dtype != out_dtype or b.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
if a.dtype != out_dtype:
a = gpu_cast_op(a)
if b.dtype != out_dtype:
b = gpu_cast_op(b)
c = tensor.AllocEmpty(out_dtype)(a.shape[0], a.shape[1], b.shape[2])
out = gpugemmbatch_no_inplace(c, np.asarray(1.0, dtype=out_dtype),
a, b, np.asarray(0.0, dtype=out_dtype))
if len(output_dims) != 3:
out = GpuDimShuffle(out.broadcastable, output_dims)(out)
return out
@register_opt()
@alpha_merge(GpuGemm, alpha_in=1, beta_in=4)
def local_gpua_gemm_alpha_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0)
def local_gpua_gemm_output_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@alpha_merge(GpuGemmBatch, alpha_in=1, beta_in=4)
def local_gpua_gemmbatch_alpha_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemmBatch, alpha_in=1, beta_in=4, out_in=0)
def local_gpua_gemmbatch_output_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer])
@register_opt2([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer], 'fast_compile')
def local_gpua_ger(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float32', 'float64']:
return
return GpuGer(inplace=op.destructive)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22])
@register_opt2([tensor.blas.Dot22], 'fast_compile')
def local_gpua_dot22(op, context_name, inputs, outputs):
return gpu_dot22
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22Scalar])
@register_opt2([tensor.blas.Dot22Scalar], 'fast_compile')
def local_gpua_dot22scalar(op, context_name, inputs, outputs):
x, y, a = inputs
x = as_gpuarray_variable(x, context_name)
y = as_gpuarray_variable(y, context_name)
z = GpuAllocEmpty(x.dtype, context_name)(x.shape[0], y.shape[1])
return [gpugemm_no_inplace(z, a, x, y, 0)]
@register_opt('fast_compile')
@op_lifter([tensor.basic.Eye])
@register_opt2([tensor.basic.Eye], 'fast_compile')
def local_gpua_eye(op, context_name, inputs, outputs):
return GpuEye(dtype=op.dtype, context_name=context_name)
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], cuda_only=True)
@register_opt2([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], 'fast_compile')
def local_gpua_crossentropysoftmaxargmax1hotwithbias(op, context_name, inputs, outputs):
return gpu_crossentropy_softmax_argmax_1hot_with_bias
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], cuda_only=True)
@register_opt2([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], 'fast_compile')
def local_gpua_crossentropysoftmax1hotwithbiasdx(op, context_name, inputs, outputs):
return gpu_crossentropy_softmax_1hot_with_bias_dx
@register_opt('fast_compile')
@op_lifter([tensor.nnet.Softmax], cuda_only=True)
@register_opt2([tensor.nnet.Softmax], 'fast_compile')
def local_gpua_softmax(op, context_name, inputs, outputs):
return gpu_softmax
@register_opt('fast_compile')
@op_lifter([tensor.nnet.SoftmaxWithBias], cuda_only=True)
@register_opt2([tensor.nnet.SoftmaxWithBias], 'fast_compile')
def local_gpua_softmaxwithbias(op, context_name, inputs, outputs):
return gpu_softmax_with_bias
@register_opt('fast_compile')
@op_lifter([theano.tensor.opt.Assert])
def local_gpua_assert(op, context_name, inputs, outputs):
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_assert_graph(op, context_name, inputs, outputs)
@register_opt2([theano.tensor.opt.Assert], 'fast_compile')
def local_gpua_assert_graph(op, context_name, inputs, outputs):
return [op(as_gpuarray_variable(inputs[0], context_name),
*inputs[1:])]
@register_opt('fast_compile')
@op_lifter([ConvOp])
@register_opt2([ConvOp], 'fast_compile')
def local_gpua_error_convop(op, context_name, inputs, outputs):
assert False, """
ConvOp does not work with the gpuarray backend.
Use the new convolution interface to have GPU convolution working:
theano.tensor.nnet.conv2d()
"""
@register_opt('fast_compile')
@op_lifter([SparseBlockGemv])
@register_opt2([SparseBlockGemv], 'fast_compile')
def local_gpua_sparseblockgemv(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
return
if op.inplace:
return gpu_sparse_block_gemv_inplace
else:
return gpu_sparse_block_gemv
@register_opt('fast_compile')
@op_lifter([SparseBlockOuter])
@register_opt2([SparseBlockOuter], 'fast_compile')
def local_gpua_sparseblockouter(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
return
if op.inplace:
return gpu_sparse_block_outer_inplace
else:
return gpu_sparse_block_outer
@register_inplace()
@local_optimizer([GpuSparseBlockGemv], inplace=True)
def local_inplace_sparseblockgemv(node):
if isinstance(node.op, GpuSparseBlockGemv) and not node.op.inplace:
return [gpu_sparse_block_gemv_inplace(*node.inputs)]
@register_inplace()
@local_optimizer([GpuSparseBlockOuter], inplace=True)
def local_inplace_sparseblockouter(node):
if isinstance(node.op, GpuSparseBlockOuter) and not node.op.inplace:
return [GpuSparseBlockOuter(inplace=True)(*node.inputs)]
# Move to Gpu optimization
@local_optimizer([GpuFromHost,
AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs])
def local_conv_gpu_conv(node):
"""
gpu_from_host(AbstractConv) -> AbstractConv(gpu_from_host)
AbstractConv(host_from_gpu) -> host_from_gpu(AbstractConv)
"""
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
BaseAbstractConv):
conv = host_input.owner.op
inps = list(host_input.owner.inputs)
ctx = infer_context_name(*inps)
inps[0] = as_gpuarray_variable(inps[0], context_name=ctx)
inps[1] = as_gpuarray_variable(inps[1], context_name=ctx)
out = conv(*inps)
# out is on the GPU because both inputs are.
out = theano.tensor.patternbroadcast(out,
node.outputs[0].broadcastable)
return [out]
if isinstance(node.op, BaseAbstractConv):
# conv(host_from_gpu) -> host_from_gpu(gpu_conv)
inp1 = node.inputs[0]
inp2 = node.inputs[1]
if ((isinstance(inp1.type, GpuArrayType) and
isinstance(inp2.type, GpuArrayType))):
# Both inputs are already directly on the GPU, nothing to do
return
inp1_on_gpu = (isinstance(inp1.type, GpuArrayType) or
(inp1.owner and isinstance(inp1.owner.op, HostFromGpu)))
inp2_on_gpu = (isinstance(inp2.type, GpuArrayType) or
(inp2.owner and isinstance(inp2.owner.op, HostFromGpu)))
if inp1_on_gpu or inp2_on_gpu:
conv = node.op
inps = list(node.inputs)
ctx = infer_context_name(*inps)
inps[0] = as_gpuarray_variable(inps[0], context_name=ctx)
inps[1] = as_gpuarray_variable(inps[1], context_name=ctx)
out = conv(*inps)
# out is on the GPU because both inputs are.
out = theano.tensor.patternbroadcast(
out,
node.outputs[0].broadcastable)
# If the original output was on CPU, we have to transfer it
if isinstance(node.outputs[0].type, tensor.TensorType):
return [tensor.as_tensor_variable(out)]
else:
return [out]
register_opt()(local_conv_gpu_conv)
# CorrMM opt
@local_optimizer([AbstractConv2d])
def local_abstractconv_gemm(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
if ((border_mode == 'full') and (subsample == (1, 1))):
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
# need to dimshuffle the kernel for full convolution
kern = kern.dimshuffle(1, 0, 2, 3)
# call GpuCorrMM_gradInputs
rval = GpuCorrMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
else:
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
# By default use GpuCorrMM
rval = GpuCorrMM(border_mode,
subsample,
filter_dilation)(gpu_contiguous(img),
gpu_contiguous(kern))
# call GpuCorrMM_gradWeights if good
# (the latter is faster if batchsize * kernelHeight * kernelWidth
# is larger than inputChannels * outputHeight * outputWidth.
# GpuConv does not always store information on the batchsize and
# channels, though, so we only use what information we have.)
if ((subsample == (1, 1)) and (filter_dilation == (1, 1)) and
(node.op.imshp is not None) and
(None not in node.op.imshp[-2:]) and
(node.op.kshp is not None) and
(None not in node.op.kshp) and
border_mode != "half"):
# we know the kernel and output size
prod1 = node.op.kshp[0] * node.op.kshp[1]
prod2 = ((node.op.imshp[-2] - node.op.kshp[0] + 1) *
(node.op.imshp[-1] - node.op.kshp[1] + 1))
if (None not in node.op.imshp[:1]):
# we also know batchsize and input channels
prod1 *= node.op.imshp[0]
prod2 *= node.op.imshp[1]
# compare to decide
if prod1 > prod2:
rval = GpuCorrMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))
# (we need to wrap the result in as_gpuarray_variable,
# because we are not allowed to replace a GpuArray with
# a DimShuffle instance in a graph optimization)
rval = as_gpuarray_variable(
rval.dimshuffle(1, 0, 2, 3),
context_name=ctx)
return [rval]
@local_optimizer([AbstractConv3d])
def local_abstractconv3d_gemm(node):
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
if ((border_mode == 'full') and (subsample == (1, 1, 1))):
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
# need to dimshuffle the kernel for full convolution
kern = kern.dimshuffle(1, 0, 2, 3, 4)
# call GpuCorr3dMM_gradInputs
rval = GpuCorr3dMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
else:
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
# By default use GpuCorr3dMM
rval = GpuCorr3dMM(border_mode,
subsample,
filter_dilation)(gpu_contiguous(img),
gpu_contiguous(kern))
# call GpuCorr3dMM_gradWeights if good
# (the latter is faster if batchsize * kernelHeight * kernelWidth * kernelDepth
# is larger than inputChannels * outputHeight * outputWidth * outputDepth.
# GpuConv does not always store information on the batchsize and
# channels, though, so we only use what information we have.)
if ((subsample == (1, 1, 1)) and (filter_dilation == (1, 1, 1)) and
(node.op.imshp is not None) and
(None not in node.op.imshp[-3:]) and
(node.op.kshp is not None) and
(None not in node.op.kshp) and
border_mode != "half"):
# we know the kernel and output size
prod1 = node.op.kshp[0] * node.op.kshp[1] * node.op.kshp[2]
prod2 = ((node.op.imshp[-3] - node.op.kshp[0] + 1) *
(node.op.imshp[-2] - node.op.kshp[1] + 1) *
(node.op.imshp[-1] - node.op.kshp[2] + 1))
if (None not in node.op.imshp[:1]):
# we also know batchsize and input channels
prod1 *= node.op.imshp[0]
prod2 *= node.op.imshp[1]
# compare to decide
if prod1 > prod2:
rval = GpuCorr3dMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))
# (we need to wrap the result in as_gpuarray_variable,
# because we are not allowed to replace a GpuArray with
# a DimShuffle instance in a graph optimization)
rval = as_gpuarray_variable(
rval.dimshuffle(1, 0, 2, 3, 4),
context_name=ctx)
return [rval]
@local_optimizer([AbstractConv2d_gradWeights])
def local_abstractconv_gradweights_gemm(node):
if not isinstance(node.op, AbstractConv2d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
rval = GpuCorrMM_gradWeights(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(
gpu_contiguous(img), gpu_contiguous(topgrad), shape)
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1]
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
@local_optimizer([AbstractConv3d_gradWeights])
def local_abstractconv3d_gradweights_gemm(node):
if not isinstance(node.op, AbstractConv3d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
rval = GpuCorr3dMM_gradWeights(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(
gpu_contiguous(img), gpu_contiguous(topgrad), shape)
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1, ::-1]
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
@local_optimizer([AbstractConv2d_gradInputs])
def local_abstractconv_gradinputs_gemm(node):
if not isinstance(node.op, AbstractConv2d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
rval = GpuCorrMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(topgrad), shape)
return [rval]
@local_optimizer([AbstractConv3d_gradInputs])
def local_abstractconv3d_gradinputs_gemm(node):
if not isinstance(node.op, AbstractConv3d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = GpuCorr3dMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(topgrad), shape)
return [rval]
# This deals with any abstract convs that have a transfer somewhere
@register_opt('fast_compile', 'conv_dnn', 'cudnn')
@op_lifter([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs])
def local_gpua_abstractconv(op, context_name, inputs, outputs):
if isinstance(outputs[0].type, GpuArrayType):
# Don't handle this node here, it's already on the GPU.
return
return local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs)
@register_opt2([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs], 'fast_compile', 'conv_dnn', 'cudnn')
def local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs):
inps = list(inputs)
inps[0] = as_gpuarray_variable(inputs[0],
context_name=context_name)
inps[1] = as_gpuarray_variable(inputs[1],
context_name=context_name)
return [op(*inps)]
def local_gpu_pool(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
op = GpuPool(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
ret_padded = op(inp_padded, ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
pool_db = LocalGroupDB()
pool_db2 = LocalGroupDB(local_opt=theano.gof.opt.GraphToGPULocalOptGroup)
pool_db2.__name__ = "pool_db2"
lifter = op_lifter([pool.Pool])(local_gpu_pool)
pool_db.register("local_gpu_pool", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_pool",
local_optimizer([pool.Pool])(local_gpu_pool),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
register_opt('fast_compile', name='pool_db')(pool_db)
register_opt2([pool.Pool], 'fast_compile', name='pool_db2')(pool_db2)
def local_gpu_max_pool_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out = gpu_contiguous(as_gpuarray_variable(out, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuMaxPoolGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out, out_grad, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
out_padded = pad_dims(out, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
lifter = op_lifter([pool.MaxPoolGrad])(local_gpu_max_pool_grad)
pool_db.register("local_gpu_max_pool_grad", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_max_pool_grad",
local_optimizer([pool.MaxPoolGrad])(local_gpu_max_pool_grad),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
def local_gpu_average_pool_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuAveragePoolGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out_grad, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
lifter = op_lifter([pool.AveragePoolGrad])(local_gpu_average_pool_grad)
pool_db.register("local_gpu_average_pool_grad", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_average_pool_grad",
local_optimizer([pool.AveragePoolGrad])(local_gpu_average_pool_grad),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
@register_opt()
@op_lifter([pool.DownsampleFactorMaxGradGrad])
@register_opt2([pool.DownsampleFactorMaxGradGrad])
def local_gpu_downsample_factor_max_grad_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out = gpu_contiguous(as_gpuarray_variable(out, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuDownsampleFactorMaxGradGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out, out_grad, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
out_padded = pad_dims(out, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
@register_opt()
@op_lifter([pool.MaxPoolRop])
@register_opt2([pool.MaxPoolRop])
def local_gpu_max_pool_rop(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, eval_inp, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
eval_inp = gpu_contiguous(as_gpuarray_variable(eval_inp, ctx_name))
op = GpuMaxPoolRop(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, eval_inp, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
eval_inp_padded = pad_dims(eval_inp, 2, nd)
ret_padded = op(inp_padded, eval_inp_padded, ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
@register_opt("low_memory")
@local_optimizer([GpuCAReduceCuda])
def local_gpu_elemwise_careduce(node):
"""
Merge some GpuCAReduceCuda and GPUElemwise.
"""
if (isinstance(node.op, GpuCAReduceCuda) and
node.op.pre_scalar_op is None and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, GpuElemwise) and
# The Op support all scalar with 1 inputs. We don't
# automatically add more case, as some like trigonometic
# operation with some reduction pattern will probably results
# in slow down.
isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)):
op = node.op
inp = node.inputs[0].owner.inputs[0]
return [GpuCAReduceCuda(scalar_op=op.scalar_op,
axis=op.axis,
reduce_mask=op.reduce_mask,
pre_scalar_op=scalar.basic.sqr)(inp)]
@local_optimizer(None)
def local_assert_no_cpu_op(node):
if (all([var.owner and isinstance(var.owner.op, HostFromGpu)
for var in node.inputs]) and
any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]
for var in node.outputs])):
if config.assert_no_cpu_op == "warn":
_logger.warning(("CPU Op %s is detected in the computation "
"graph") % node)
elif config.assert_no_cpu_op == "raise":
raise AssertionError("The Op %s is on CPU." % node)
elif config.assert_no_cpu_op == "pdb":
pdb.set_trace()
# Register the local_assert_no_cpu_op:
assert_no_cpu_op = theano.tensor.opt.in2out(local_assert_no_cpu_op,
name='assert_no_cpu_op')
# 49.2 is after device specialization & fusion optimizations for last transfers
optdb.register('gpua_assert_no_cpu_op', assert_no_cpu_op, 49.2,
'assert_no_cpu_op')
def tensor_to_gpu(x, context_name):
if isinstance(x.type, tensor.TensorType):
y = GpuArrayType(broadcastable=x.type.broadcastable,
context_name=context_name,
dtype=x.type.dtype)()
if x.name:
y.name = x.name + '[Gpua]'
return y
else:
return x
def gpu_safe_new(x, tag=''):
"""
Internal function that constructs a new variable from x with the same
type, but with a different name (old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
"""
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag=None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those (in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] = nw_x
nw_outputs = scan_utils.clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
@register_opt('scan', 'fast_compile')
@op_lifter([scan_op.Scan])
@register_opt2([scan_op.Scan], 'fast_compile')
def local_gpua_scan_to_gpua(op, context_name, inputs, outputs):
info = copy.deepcopy(op.info)
if info.get('gpua', False):
return
info['gpua'] = True
nw_ins = [inputs[0]]
e = (1 +
op.n_seqs +
op.n_mit_mot +
op.n_mit_sot +
op.n_sit_sot +
op.n_shared_outs)
nw_ins += [safe_to_gpu(x, context_name) for x in inputs[1:e]]
b = e
e = e + op.n_nit_sot
nw_ins += inputs[b:e]
nw_ins += [safe_to_gpu(x, context_name) for x in inputs[e:]]
scan_ins = [tensor_to_gpu(x, context_name) for x in op.inputs]
# The inner output corresponding to the looping condition should not be
# moved to the gpu
if op.info['as_while']:
scan_outs = [safe_to_gpu(x, context_name) for x in op.outputs[:-1]]
scan_outs += [op.outputs[-1]]
else:
scan_outs = [safe_to_gpu(x, context_name) for x in op.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(op.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about the gpu and can not
# handle graphs with inputs being on the gpu
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
nw_op = scan_op.Scan(scan_ins, scan_outs, info,
typeConstructor=typebuild).make_node(*nw_ins)
return nw_op.outputs
def _scan_type_infer(node):
context_name = infer_context_name(*node.inputs)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
return typebuild
# Add optimization : maxandargmax (CPU -> GPU)
@register_opt('fast_compile')
@op_lifter([tensor.MaxAndArgmax])
@register_opt2([tensor.MaxAndArgmax], 'fast_compile')
def local_gpu_maxandargmax(op, context_name, inputs, outputs):
op = GpuMaxAndArgmax(op.get_params(None))
if inputs[0].dtype == "float16":
# For now it is better to copy/cast on the GPU then transfer to the CPU
casted_inputs = inputs[0].astype('float32')
ret = op(casted_inputs)
return [ret[0].astype('float16'), ret[1]]
return op
# solve
@register_opt('fast_compile')
@op_lifter([slinalg.Solve])
@register_opt2([theano.tensor.slinalg.Solve], 'fast_compile')
def local_gpu_solve(op, context_name, inputs, outputs):
if not cusolver_available:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
if op.A_structure not in MATRIX_STRUCTURES_SOLVE:
return
op = GpuCusolverSolve(A_structure=op.A_structure)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32'),
inputs[1].astype('float32')).astype('float16')
return op
@register_inplace()
@local_optimizer([GpuCusolverSolve], inplace=True)
def local_inplace_gpu_solve(node):
if isinstance(node.op, GpuCusolverSolve) and not node.op.inplace:
return [GpuCusolverSolve(A_structure=node.op.A_structure, trans=node.op.trans,
inplace=True)(*node.inputs)]
# Cholesky decomposition
@register_opt('fast_compile')
@op_lifter([slinalg.Cholesky])
@register_opt2([theano.tensor.slinalg.Cholesky], 'fast_compile')
def local_gpu_cholesky(op, context_name, inputs, outputs):
if not cusolver_available:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuCholesky(lower=op.lower, inplace=op.destructive)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
@register_inplace()
@local_optimizer([GpuCholesky], inplace=True)
def local_inplace_cholesky(node):
if isinstance(node.op, GpuCholesky) and not node.op.inplace:
return [GpuCholesky(lower=node.op.lower, inplace=True)(*node.inputs)]
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.MatrixInverse])
@register_opt2([theano.tensor.nlinalg.MatrixInverse], 'magma', 'fast_compile')
def local_gpu_matrix_inverse(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuMagmaMatrixInverse()
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
@register_inplace()
@local_optimizer([GpuMagmaMatrixInverse])
def local_inplace_matrix_inverse_inplace(node):
if isinstance(node.op, GpuMagmaMatrixInverse):
if not node.op.inplace:
return [node.op.clone_inplace()(*node.inputs)]
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.SVD])
@register_opt2([theano.tensor.nlinalg.SVD], 'magma', 'fast_compile')
def local_gpu_svd(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuMagmaSVD(full_matrices=op.full_matrices,
compute_uv=op.compute_uv)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
# Do not register in fast_run or fast_compile.
# It will be added to fast_run if the GPU is enabled.
optdb.register('gpua_scanOp_make_inplace',
scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer,
gpua_flag=True),
75,
'gpuarray',
'inplace',
'scan')
# Register GPU convolution implementation
# They are tried in a specific order so we can control
# which ones take precedence over others.
abstractconv_groupopt = theano.gof.optdb.LocalGroupDB()
abstractconv_groupopt.__name__ = "gpuarray_abstractconv_opts"
register_opt('fast_compile')(abstractconv_groupopt)
# We import these opts here instead of at the top of this file
# to avoid a circular dependency problem with dnn
from .dnn import (local_abstractconv_cudnn, local_abstractconv_gw_cudnn,
local_abstractconv_gi_cudnn) # noqa: 402
abstractconv_groupopt.register('local_abstractconv_dnn',
local_abstractconv_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
abstractconv_groupopt.register('local_abstractconv_gw_dnn',
local_abstractconv_gw_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
abstractconv_groupopt.register('local_abstractconv_gi_dnn',
local_abstractconv_gi_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
# The GEMM-based convolution comes last to catch all remaining cases.
# It can be disabled by excluding 'conv_gemm'.
abstractconv_groupopt.register('local_abstractconv_gemm', local_abstractconv_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gemm', local_abstractconv3d_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv_gradweights_gemm',
local_abstractconv_gradweights_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gradweights_gemm',
local_abstractconv3d_gradweights_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv_gradinputs',
local_abstractconv_gradinputs_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gradinputs',
local_abstractconv3d_gradinputs_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
# Register cuDNN batch normalization implementation
# We import these opts here instead of at the top of this file
# to avoid a circular dependency problem with dnn
from .dnn import (local_abstract_batch_norm_train_cudnn,
local_abstract_batch_norm_train_grad_cudnn,
local_abstract_batch_norm_inference_cudnn) # noqa: 402
abstract_batch_norm_groupopt = theano.gof.optdb.LocalGroupDB()
abstract_batch_norm_groupopt.__name__ = "gpuarray_batchnorm_opts"
register_opt('fast_compile')(abstract_batch_norm_groupopt)
abstract_batch_norm_db = LocalGroupDB()
abstract_batch_norm_db2 = LocalGroupDB(
local_opt=theano.gof.opt.GraphToGPULocalOptGroup)
abstract_batch_norm_db2.__name__ = "abstract_batch_norm_db2"
register_opt('fast_compile', name='abstract_batch_norm_db')(
abstract_batch_norm_db)
register_opt2([bn.AbstractBatchNormTrain,
bn.AbstractBatchNormTrainGrad,
bn.AbstractBatchNormInference],
'fast_compile', name='abstract_batch_norm_db2')(
abstract_batch_norm_db2)
for op, fct, cpu in [(bn.AbstractBatchNormTrain,
local_abstract_batch_norm_train_cudnn,
bn.local_abstract_batch_norm_train),
(bn.AbstractBatchNormTrainGrad,
local_abstract_batch_norm_train_grad_cudnn,
bn.local_abstract_batch_norm_train_grad),
(bn.AbstractBatchNormInference,
local_abstract_batch_norm_inference_cudnn,
bn.local_abstract_batch_norm_inference)]:
lifter = op_lifter([op])(fct)
abstract_batch_norm_db.register(fct.__name__,
lifter,
'gpuarray', 'fast_compile', 'fast_run',
'cudnn', 'batchnorm_dnn',
position=1)
abstract_batch_norm_db2.register(fct.__name__,
local_optimizer([op])(fct),
'gpuarray', 'fast_compile', 'fast_run',
'cudnn', 'batchnorm_dnn',
position=1)
# cpu is a normal optimization. We can't register it in
# GraphToGPU. So for now, only add it to the slower EQ phase. If
# there is no cuDNN, we still want to move it to the GPU now with
# a Theano graph so to have this graph on the GPU.
abstract_batch_norm_db.register(cpu.__name__, cpu,
'gpuarray', 'fast_compile', 'fast_run',
position='last')
|
the-stack_0_9740 | import pickle
import joblib
import pytest
import numpy as np
import scipy.sparse as sp
from unittest.mock import Mock
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone, is_classifier
from sklearn.svm import OneClassSVM
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import Nystroem
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
from sklearn.linear_model import _sgd_fast as sgd_fast
from sklearn.linear_model import _stochastic_gradient
from sklearn.model_selection import RandomizedSearchCV
def _update_kwargs(kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
if "tol" not in kwargs:
kwargs["tol"] = None
if "max_iter" not in kwargs:
kwargs["max_iter"] = 5
class _SparseSGDClassifier(linear_model.SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super().decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super().predict_proba(X)
class _SparseSGDRegressor(linear_model.SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
# XXX untested as of v0.22
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.decision_function(self, X, *args, **kw)
class _SparseSGDOneClassSVM(linear_model.SGDOneClassSVM):
def fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.fit(self, X, *args, **kw)
def partial_fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.partial_fit(self, X, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.decision_function(self, X, *args, **kw)
def SGDClassifier(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDClassifier(**kwargs)
def SGDRegressor(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDRegressor(**kwargs)
def SGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDOneClassSVM(**kwargs)
def SparseSGDClassifier(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDClassifier(**kwargs)
def SparseSGDRegressor(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDRegressor(**kwargs)
def SparseSGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDOneClassSVM(**kwargs)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array(
[
[-1, 1],
[-0.75, 0.5],
[-1.5, 1.5],
[1, 1],
[0.75, 0.5],
[1.5, 1.5],
[-1, -1],
[0, -0.5],
[1, -1],
]
)
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array(
[
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
]
)
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array(
[
[1, 0.9, 0.8, 0, 0, 0],
[1, 0.84, 0.98, 0, 0, 0],
[1, 0.96, 0.88, 0, 0, 0],
[1, 0.91, 0.99, 0, 0, 0],
[0, 0, 0, 0.89, 0.91, 1],
[0, 0, 0, 0.79, 0.84, 1],
[0, 0, 0, 0.91, 0.95, 1],
[0, 0, 0, 0.93, 1, 1],
]
)
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
###############################################################################
# Common Test Case to classification and regression
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(klass, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass in (SparseSGDClassifier, SparseSGDRegressor):
decay = 0.01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
@pytest.mark.parametrize("fit_method", ["fit", "partial_fit"])
@pytest.mark.parametrize(
"params, err_msg",
[
({"alpha": -0.1}, "alpha must be >= 0"),
({"penalty": "foobar", "l1_ratio": 0.85}, "Penalty foobar is not supported"),
({"loss": "foobar"}, "The loss foobar is not supported"),
({"l1_ratio": 1.1}, r"l1_ratio must be in \[0, 1\]"),
({"learning_rate": "<unknown>"}, "learning rate <unknown> is not supported"),
({"nu": -0.5}, r"nu must be in \(0, 1]"),
({"nu": 2}, r"nu must be in \(0, 1]"),
({"alpha": 0, "learning_rate": "optimal"}, "alpha must be > 0"),
({"eta0": 0, "learning_rate": "constant"}, "eta0 must be > 0"),
({"max_iter": -1}, "max_iter must be > zero"),
({"shuffle": "false"}, "shuffle must be either True or False"),
({"early_stopping": "false"}, "early_stopping must be either True or False"),
(
{"validation_fraction": -0.1},
r"validation_fraction must be in range \(0, 1\)",
),
({"n_iter_no_change": 0}, "n_iter_no_change must be >= 1"),
],
# Avoid long error messages in test names:
# https://github.com/scikit-learn/scikit-learn/issues/21362
ids=lambda x: x[:10].replace("]", "") if isinstance(x, str) else x,
)
def test_sgd_estimator_params_validation(klass, fit_method, params, err_msg):
"""Validate parameters in the different SGD estimators."""
try:
sgd_estimator = klass(**params)
except TypeError as err:
if "unexpected keyword argument" in str(err):
# skip test if the parameter is not supported by the estimator
return
raise err
with pytest.raises(ValueError, match=err_msg):
if is_classifier(sgd_estimator) and fit_method == "partial_fit":
fit_params = {"classes": np.unique(Y)}
else:
fit_params = {}
getattr(sgd_estimator, fit_method)(X, Y, **fit_params)
def _test_warm_start(klass, X, Y, lr):
# Test that explicit warm restart...
clf = klass(alpha=0.01, eta0=0.01, shuffle=False, learning_rate=lr)
clf.fit(X, Y)
clf2 = klass(alpha=0.001, eta0=0.01, shuffle=False, learning_rate=lr)
clf2.fit(X, Y, coef_init=clf.coef_.copy(), intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(
alpha=0.01, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr
)
clf3.fit(X, Y)
assert clf3.t_ == clf.t_
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert clf3.t_ == clf2.t_
assert_array_almost_equal(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start(klass, lr):
_test_warm_start(klass, X, Y, lr)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_input_format(klass):
# Input format tests.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
with pytest.raises(ValueError):
clf.fit(X, Y_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_clone(klass):
# Test whether clone works ok.
clf = klass(alpha=0.01, penalty="l1")
clf = clone(clf)
clf.set_params(penalty="l2")
clf.fit(X, Y)
clf2 = klass(alpha=0.01, penalty="l2")
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_plain_has_no_average_attr(klass):
clf = klass(average=True, eta0=0.01)
clf.fit(X, Y)
assert hasattr(clf, "_average_coef")
assert hasattr(clf, "_average_intercept")
assert hasattr(clf, "_standard_intercept")
assert hasattr(clf, "_standard_coef")
clf = klass()
clf.fit(X, Y)
assert not hasattr(clf, "_average_coef")
assert not hasattr(clf, "_average_intercept")
assert not hasattr(clf, "_standard_intercept")
assert not hasattr(clf, "_standard_coef")
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_late_onset_averaging_not_reached(klass):
clf1 = klass(average=600)
clf2 = klass()
for _ in range(100):
if is_classifier(clf1):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
if klass in [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]:
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
assert_allclose(clf1.offset_, clf2.offset_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_late_onset_averaging_reached(klass):
eta0 = 0.001
alpha = 0.0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = klass(
average=7,
learning_rate="constant",
loss="squared_error",
eta0=eta0,
alpha=alpha,
max_iter=2,
shuffle=False,
)
clf2 = klass(
average=0,
learning_rate="constant",
loss="squared_error",
eta0=eta0,
alpha=alpha,
max_iter=1,
shuffle=False,
)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = asgd(
klass,
X,
Y_encode,
eta0,
alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_,
)
assert_array_almost_equal(clf1.coef_.ravel(), average_weights.ravel(), decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_early_stopping(klass):
X = iris.data[iris.target > 0]
Y = iris.target[iris.target > 0]
for early_stopping in [True, False]:
max_iter = 1000
clf = klass(early_stopping=early_stopping, tol=1e-3, max_iter=max_iter).fit(
X, Y
)
assert clf.n_iter_ < max_iter
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_adaptive_longer_than_constant(klass):
clf1 = klass(learning_rate="adaptive", eta0=0.01, tol=1e-3, max_iter=100)
clf1.fit(iris.data, iris.target)
clf2 = klass(learning_rate="constant", eta0=0.01, tol=1e-3, max_iter=100)
clf2.fit(iris.data, iris.target)
assert clf1.n_iter_ > clf2.n_iter_
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_validation_set_not_used_for_training(klass):
X, Y = iris.data, iris.target
validation_fraction = 0.4
seed = 42
shuffle = False
max_iter = 10
clf1 = klass(
early_stopping=True,
random_state=np.random.RandomState(seed),
validation_fraction=validation_fraction,
learning_rate="constant",
eta0=0.01,
tol=None,
max_iter=max_iter,
shuffle=shuffle,
)
clf1.fit(X, Y)
assert clf1.n_iter_ == max_iter
clf2 = klass(
early_stopping=False,
random_state=np.random.RandomState(seed),
learning_rate="constant",
eta0=0.01,
tol=None,
max_iter=max_iter,
shuffle=shuffle,
)
if is_classifier(clf2):
cv = StratifiedShuffleSplit(test_size=validation_fraction, random_state=seed)
else:
cv = ShuffleSplit(test_size=validation_fraction, random_state=seed)
idx_train, idx_val = next(cv.split(X, Y))
idx_train = np.sort(idx_train) # remove shuffling
clf2.fit(X[idx_train], Y[idx_train])
assert clf2.n_iter_ == max_iter
assert_array_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_n_iter_no_change(klass):
X, Y = iris.data, iris.target
# test that n_iter_ increases monotonically with n_iter_no_change
for early_stopping in [True, False]:
n_iter_list = [
klass(
early_stopping=early_stopping,
n_iter_no_change=n_iter_no_change,
tol=1e-4,
max_iter=1000,
)
.fit(X, Y)
.n_iter_
for n_iter_no_change in [2, 3, 10]
]
assert_array_equal(n_iter_list, sorted(n_iter_list))
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_not_enough_sample_for_early_stopping(klass):
# test an error is raised if the training or validation set is empty
clf = klass(early_stopping=True, validation_fraction=0.99)
with pytest.raises(ValueError):
clf.fit(X3, Y3)
###############################################################################
# Classification Test Case
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_clf(klass):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log_loss", "modified_huber"):
clf = klass(
penalty="l2",
alpha=0.01,
fit_intercept=True,
loss=loss,
max_iter=10,
shuffle=True,
)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM]
)
def test_provide_coef(klass):
"""Check that the shape of `coef_init` is validated."""
with pytest.raises(ValueError, match="Provided coef_init does not match dataset"):
klass().fit(X, Y, coef_init=np.zeros((3,)))
@pytest.mark.parametrize(
"klass, fit_params",
[
(SGDClassifier, {"intercept_init": np.zeros((3,))}),
(SparseSGDClassifier, {"intercept_init": np.zeros((3,))}),
(SGDOneClassSVM, {"offset_init": np.zeros((3,))}),
(SparseSGDOneClassSVM, {"offset_init": np.zeros((3,))}),
],
)
def test_set_intercept_offset(klass, fit_params):
"""Check that `intercept_init` or `offset_init` is validated."""
sgd_estimator = klass()
with pytest.raises(ValueError, match="does not match dataset"):
sgd_estimator.fit(X, Y, **fit_params)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_sgd_early_stopping_with_partial_fit(klass):
"""Check that we raise an error for `early_stopping` used with
`partial_fit`.
"""
err_msg = "early_stopping should be False with partial_fit"
with pytest.raises(ValueError, match=err_msg):
klass(early_stopping=True).partial_fit(X, Y)
@pytest.mark.parametrize(
"klass, fit_params",
[
(SGDClassifier, {"intercept_init": 0}),
(SparseSGDClassifier, {"intercept_init": 0}),
(SGDOneClassSVM, {"offset_init": 0}),
(SparseSGDOneClassSVM, {"offset_init": 0}),
],
)
def test_set_intercept_offset_binary(klass, fit_params):
"""Check that we can pass a scaler with binary classification to
`intercept_init` or `offset_init`."""
klass().fit(X5, Y5, **fit_params)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_average_binary_computed_correctly(klass):
# Checks the SGDClassifier correctly computes the average weights
eta = 0.1
alpha = 2.0
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_, average_weights, decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_to_intercept(klass):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = klass().fit(X5, Y5)
klass().fit(X5, Y5, intercept_init=clf.intercept_)
clf = klass().fit(X, Y)
klass().fit(X, Y, intercept_init=clf.intercept_)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_at_least_two_labels(klass):
# Target must have at least two labels
clf = klass(alpha=0.01, max_iter=20)
with pytest.raises(ValueError):
clf.fit(X2, np.ones(9))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_weight_class_balanced(klass):
# partial_fit with class_weight='balanced' not supported"""
regex = (
r"class_weight 'balanced' is not supported for "
r"partial_fit\. In order to use 'balanced' weights, "
r"use compute_class_weight\('balanced', classes=classes, y=y\). "
r"In place of y you can us a large enough sample "
r"of the full training set target to properly "
r"estimate the class frequency distributions\. "
r"Pass the resulting weights as the class_weight "
r"parameter\."
)
with pytest.raises(ValueError, match=regex):
klass(class_weight="balanced").partial_fit(X, Y, classes=np.unique(Y))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_average(klass):
eta = 0.001
alpha = 0.01
# Multi-class average test case
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = asgd(klass, X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept, clf.intercept_[i], decimal=16)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_with_init_coef(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)), intercept_init=np.zeros(3))
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape, (3,)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_njobs(klass):
# Multi-class test case with multi-core support
clf = klass(alpha=0.01, max_iter=20, n_jobs=2).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_set_coef_multiclass(klass):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = klass().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = klass().fit(X2, Y2, intercept_init=np.zeros((3,)))
# TODO: Remove filterwarnings in v1.2.
@pytest.mark.filterwarnings("ignore:.*squared_loss.*:FutureWarning")
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_predict_proba_method_access(klass):
# Checks that SGDClassifier predict_proba and predict_log_proba methods
# can either be accessed or raise an appropriate error message
# otherwise. See
# https://github.com/scikit-learn/scikit-learn/issues/10938 for more
# details.
for loss in linear_model.SGDClassifier.loss_functions:
clf = SGDClassifier(loss=loss)
# TODO(1.3): Remove "log"
if loss in ("log_loss", "log", "modified_huber"):
assert hasattr(clf, "predict_proba")
assert hasattr(clf, "predict_log_proba")
else:
message = "probability estimates are not available for loss={!r}".format(
loss
)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
with pytest.raises(AttributeError, match=message):
clf.predict_proba
with pytest.raises(AttributeError, match=message):
clf.predict_log_proba
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_proba(klass):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, max_iter=10, tol=None).fit(X, Y)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log_loss", "modified_huber"]:
clf = klass(loss=loss, alpha=0.01, max_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert p[0, 1] > 0.5
p = clf.predict_proba([[-1, -1]])
assert p[0, 1] < 0.5
p = clf.predict_log_proba([[3, 2]])
assert p[0, 1] > p[0, 0]
p = clf.predict_log_proba([[-1, -1]])
assert p[0, 1] < p[0, 0]
# log loss multiclass probability estimates
clf = klass(loss="log_loss", alpha=0.01, max_iter=10).fit(X2, Y2)
d = clf.decision_function([[0.1, -0.1], [0.3, 0.2]])
p = clf.predict_proba([[0.1, -0.1], [0.3, 0.2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert np.all(p[0] >= 0)
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
lp = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), lp)
lp = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), lp)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = klass(loss="modified_huber", alpha=0.01, max_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if klass != SparseSGDClassifier:
assert np.argmax(d, axis=1) == np.argmax(p, axis=1)
else: # XXX the sparse test gets a different X2 (?)
assert np.argmin(d, axis=1) == np.argmin(p, axis=1)
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.0] * 3)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_l1(klass):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = klass(
penalty="l1",
alpha=0.2,
fit_intercept=False,
max_iter=2000,
tol=None,
shuffle=False,
)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_class_weights(klass):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_equal_class_weight(klass):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = klass(alpha=0.1, max_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_label(klass):
# ValueError due to not existing class label.
clf = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5})
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_format(klass):
# ValueError due to wrong class_weight argument type.
clf = klass(alpha=0.1, max_iter=1000, class_weight=[0.5])
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_weights_multiplied(klass):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: 0.6, 2: 0.3}
rng = np.random.RandomState(0)
sample_weights = rng.random_sample(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = klass(alpha=0.1, max_iter=20, class_weight=class_weights)
clf2 = klass(alpha=0.1, max_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_balanced_weight(klass):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = klass(alpha=0.0001, max_iter=1000, class_weight=None, shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf.predict(X), average="weighted")
assert_almost_equal(f1, 0.96, decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = klass(
alpha=0.0001, max_iter=1000, class_weight="balanced", shuffle=False
).fit(X, y)
f1 = metrics.f1_score(y, clf_balanced.predict(X), average="weighted")
assert_almost_equal(f1, 0.96, decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = klass(max_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average="weighted") < 0.96
# fit a model with balanced class_weight enabled
clf = klass(max_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average="weighted") > 0.96
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sample_weights(klass):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM]
)
def test_wrong_sample_weights(klass):
# Test if ValueError is raised if sample_weight has wrong shape
if klass in [SGDClassifier, SparseSGDClassifier]:
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
clf = klass(nu=0.1, max_iter=1000, fit_intercept=False)
# provided sample_weight too long
with pytest.raises(ValueError):
clf.fit(X, Y, sample_weight=np.arange(7))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_exception(klass):
clf = klass(alpha=0.01)
# classes was not specified
with pytest.raises(ValueError):
clf.partial_fit(X3, Y3)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_binary(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert clf.coef_.shape == (1, X.shape[1])
assert clf.intercept_.shape == (1,)
assert clf.decision_function([[0, 0]]).shape == (1,)
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass_average(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
clf.partial_fit(X2[third:], Y2[third:])
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_fit_then_partial_fit(klass):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = klass()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_classif(klass, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = klass(alpha=0.01, eta0=0.01, max_iter=2, learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_regression_losses(klass):
random_state = np.random.RandomState(1)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.1,
loss="epsilon_insensitive",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.1,
loss="squared_epsilon_insensitive",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(alpha=0.01, loss="huber", random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.01,
loss="squared_error",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_warm_start_multiclass(klass):
_test_warm_start(klass, X2, Y2, "optimal")
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_multiple_fit(klass):
# Test multiple calls of fit w/ different shaped inputs.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
assert hasattr(clf, "coef_")
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
###############################################################################
# Regression Test Case
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_reg(klass):
# Check that SGD gives any results.
clf = klass(alpha=0.1, max_iter=2, fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert clf.coef_[0] == clf.coef_[1]
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_computed_correctly(klass):
# Tests the average regressor matches the naive implementation
eta = 0.001
alpha = 0.01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_partial_fit(klass):
# Tests whether the partial fit yields the same average as the fit
eta = 0.001
alpha = 0.01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.partial_fit(X[: int(n_samples / 2)][:], y[: int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2) :][:], y[int(n_samples / 2) :])
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_average_sparse(klass):
# Checks the average weights on data with 0s
eta = 0.001
alpha = 0.01
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
n_samples = Y3.shape[0]
clf.partial_fit(X3[: int(n_samples / 2)][:], Y3[: int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2) :][:], Y3[int(n_samples / 2) :])
average_weights, average_intercept = asgd(klass, X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_least_squares_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss="squared_error", alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss="squared_error", alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_epsilon_insensitive(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(
loss="epsilon_insensitive",
epsilon=0.01,
alpha=0.1,
max_iter=20,
fit_intercept=False,
)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(
loss="epsilon_insensitive",
epsilon=0.01,
alpha=0.1,
max_iter=20,
fit_intercept=False,
)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_huber_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_elasticnet_convergence(klass):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(
alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False
)
cd.fit(X, y)
sgd = klass(
penalty="elasticnet",
max_iter=50,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=False,
)
sgd.fit(X, y)
err_msg = (
"cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f" % (alpha, l1_ratio)
)
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2, err_msg=err_msg)
@ignore_warnings
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_partial_fit(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert clf.coef_.shape == (X.shape[1],)
assert clf.intercept_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1,)
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit(klass, lr):
clf = klass(alpha=0.01, max_iter=2, eta0=0.01, learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_loss_function_epsilon(klass):
clf = klass(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions["huber"][1] == 0.1
###############################################################################
# SGD One Class SVM Test Case
# a simple implementation of ASGD to use for testing SGDOneClassSVM
def asgd_oneclass(klass, X, eta, nu, coef_init=None, offset_init=0.0):
if coef_init is None:
coef = np.zeros(X.shape[1])
else:
coef = coef_init
average_coef = np.zeros(X.shape[1])
offset = offset_init
intercept = 1 - offset
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass == SparseSGDOneClassSVM:
decay = 0.01
for i, entry in enumerate(X):
p = np.dot(entry, coef)
p += intercept
if p <= 1.0:
gradient = -1
else:
gradient = 0
coef *= max(0, 1.0 - (eta * nu / 2))
coef += -(eta * gradient * entry)
intercept += -(eta * (nu + gradient)) * decay
average_coef *= i
average_coef += coef
average_coef /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_coef, 1 - average_intercept
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def _test_warm_start_oneclass(klass, X, lr):
# Test that explicit warm restart...
clf = klass(nu=0.5, eta0=0.01, shuffle=False, learning_rate=lr)
clf.fit(X)
clf2 = klass(nu=0.1, eta0=0.01, shuffle=False, learning_rate=lr)
clf2.fit(X, coef_init=clf.coef_.copy(), offset_init=clf.offset_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(nu=0.5, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr)
clf3.fit(X)
assert clf3.t_ == clf.t_
assert_allclose(clf3.coef_, clf.coef_)
clf3.set_params(nu=0.1)
clf3.fit(X)
assert clf3.t_ == clf2.t_
assert_allclose(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start_oneclass(klass, lr):
_test_warm_start_oneclass(klass, X, lr)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_clone_oneclass(klass):
# Test whether clone works ok.
clf = klass(nu=0.5)
clf = clone(clf)
clf.set_params(nu=0.1)
clf.fit(X)
clf2 = klass(nu=0.1)
clf2.fit(X)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_partial_fit_oneclass(klass):
third = X.shape[0] // 3
clf = klass(nu=0.1)
clf.partial_fit(X[:third])
assert clf.coef_.shape == (X.shape[1],)
assert clf.offset_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1,)
previous_coefs = clf.coef_
clf.partial_fit(X[third:])
# check that coef_ haven't been re-allocated
assert clf.coef_ is previous_coefs
# raises ValueError if number of features does not match previous data
with pytest.raises(ValueError):
clf.partial_fit(X[:, 1])
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_oneclass(klass, lr):
clf = klass(nu=0.05, max_iter=2, eta0=0.01, learning_rate=lr, shuffle=False)
clf.fit(X)
y_scores = clf.decision_function(T)
t = clf.t_
coef = clf.coef_
offset = clf.offset_
clf = klass(nu=0.05, eta0=0.01, max_iter=1, learning_rate=lr, shuffle=False)
for _ in range(2):
clf.partial_fit(X)
y_scores2 = clf.decision_function(T)
assert clf.t_ == t
assert_allclose(y_scores, y_scores2)
assert_allclose(clf.coef_, coef)
assert_allclose(clf.offset_, offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_late_onset_averaging_reached_oneclass(klass):
# Test average
eta0 = 0.001
nu = 0.05
# 2 passes over the training set but average only at second pass
clf1 = klass(
average=7, learning_rate="constant", eta0=eta0, nu=nu, max_iter=2, shuffle=False
)
# 1 pass over the training set with no averaging
clf2 = klass(
average=0, learning_rate="constant", eta0=eta0, nu=nu, max_iter=1, shuffle=False
)
clf1.fit(X)
clf2.fit(X)
# Start from clf2 solution, compute averaging using asgd function and
# compare with clf1 solution
average_coef, average_offset = asgd_oneclass(
klass, X, eta0, nu, coef_init=clf2.coef_.ravel(), offset_init=clf2.offset_
)
assert_allclose(clf1.coef_.ravel(), average_coef.ravel())
assert_allclose(clf1.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_computed_correctly_oneclass(klass):
# Tests the average SGD One-Class SVM matches the naive implementation
eta = 0.001
nu = 0.05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.fit(X)
average_coef, average_offset = asgd_oneclass(klass, X, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_partial_fit_oneclass(klass):
# Tests whether the partial fit yields the same average as the fit
eta = 0.001
nu = 0.05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.partial_fit(X[: int(n_samples / 2)][:])
clf.partial_fit(X[int(n_samples / 2) :][:])
average_coef, average_offset = asgd_oneclass(klass, X, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_average_sparse_oneclass(klass):
# Checks the average coef on data with 0s
eta = 0.001
nu = 0.01
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
n_samples = X3.shape[0]
clf.partial_fit(X3[: int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2) :])
average_coef, average_offset = asgd_oneclass(klass, X3, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
def test_sgd_oneclass():
# Test fit, decision_function, predict and score_samples on a toy
# dataset
X_train = np.array([[-2, -1], [-1, -1], [1, 1]])
X_test = np.array([[0.5, -2], [2, 2]])
clf = SGDOneClassSVM(
nu=0.5, eta0=1, learning_rate="constant", shuffle=False, max_iter=1
)
clf.fit(X_train)
assert_allclose(clf.coef_, np.array([-0.125, 0.4375]))
assert clf.offset_[0] == -0.5
scores = clf.score_samples(X_test)
assert_allclose(scores, np.array([-0.9375, 0.625]))
dec = clf.score_samples(X_test) - clf.offset_
assert_allclose(clf.decision_function(X_test), dec)
pred = clf.predict(X_test)
assert_array_equal(pred, np.array([-1, 1]))
def test_ocsvm_vs_sgdocsvm():
# Checks SGDOneClass SVM gives a good approximation of kernelized
# One-Class SVM
nu = 0.05
gamma = 2.0
random_state = 42
# Generate train and test data
rng = np.random.RandomState(random_state)
X = 0.3 * rng.randn(500, 2)
X_train = np.r_[X + 2, X - 2]
X = 0.3 * rng.randn(100, 2)
X_test = np.r_[X + 2, X - 2]
# One-Class SVM
clf = OneClassSVM(gamma=gamma, kernel="rbf", nu=nu)
clf.fit(X_train)
y_pred_ocsvm = clf.predict(X_test)
dec_ocsvm = clf.decision_function(X_test).reshape(1, -1)
# SGDOneClassSVM using kernel approximation
max_iter = 15
transform = Nystroem(gamma=gamma, random_state=random_state)
clf_sgd = SGDOneClassSVM(
nu=nu,
shuffle=True,
fit_intercept=True,
max_iter=max_iter,
random_state=random_state,
tol=-np.inf,
)
pipe_sgd = make_pipeline(transform, clf_sgd)
pipe_sgd.fit(X_train)
y_pred_sgdocsvm = pipe_sgd.predict(X_test)
dec_sgdocsvm = pipe_sgd.decision_function(X_test).reshape(1, -1)
assert np.mean(y_pred_sgdocsvm == y_pred_ocsvm) >= 0.99
corrcoef = np.corrcoef(np.concatenate((dec_ocsvm, dec_sgdocsvm)))[0, 1]
assert corrcoef >= 0.9
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(
n_samples=1000, n_features=100, n_informative=20, random_state=1234
)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(
alpha=0.001,
penalty="elasticnet",
tol=None,
max_iter=6,
l1_ratio=0.9999999999,
random_state=42,
).fit(X, y)
est_l1 = SGDClassifier(
alpha=0.001, penalty="l1", max_iter=6, random_state=42, tol=None
).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(
alpha=0.001,
penalty="elasticnet",
tol=None,
max_iter=6,
l1_ratio=0.0000000001,
random_state=42,
).fit(X, y)
est_l2 = SGDClassifier(
alpha=0.001, penalty="l2", max_iter=6, random_state=42, tol=None
).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all="raise"):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert np.isfinite(X).all()
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert np.isfinite(X_scaled).all()
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.0).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss="squared_hinge", max_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert np.isfinite(model.coef_).all()
# model is numerically unstable on unscaled data
msg_regxp = (
r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help."
)
with pytest.raises(ValueError, match=msg_regxp):
model.fit(X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(
loss="squared_hinge",
max_iter=10,
shuffle=True,
penalty="elasticnet",
l1_ratio=0.3,
alpha=0.01,
eta0=0.001,
random_state=0,
tol=None,
)
with np.errstate(all="raise"):
model.fit(iris.data, iris.target)
assert np.isfinite(model.coef_).all()
@pytest.mark.parametrize("penalty", ["l2", "l1", "elasticnet"])
def test_large_regularization(penalty):
# Non regression tests for numerical stability issues caused by large
# regularization parameters
model = SGDClassifier(
alpha=1e5,
learning_rate="constant",
eta0=0.1,
penalty=penalty,
shuffle=False,
tol=None,
max_iter=6,
)
with np.errstate(all="raise"):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
def test_tol_parameter():
# Test that the tol parameter behaves as expected
X = StandardScaler().fit_transform(iris.data)
y = iris.target == 1
# With tol is None, the number of iteration should be equal to max_iter
max_iter = 42
model_0 = SGDClassifier(tol=None, random_state=0, max_iter=max_iter)
model_0.fit(X, y)
assert max_iter == model_0.n_iter_
# If tol is not None, the number of iteration should be less than max_iter
max_iter = 2000
model_1 = SGDClassifier(tol=0, random_state=0, max_iter=max_iter)
model_1.fit(X, y)
assert max_iter > model_1.n_iter_
assert model_1.n_iter_ > 5
# A larger tol should yield a smaller number of iteration
model_2 = SGDClassifier(tol=0.1, random_state=0, max_iter=max_iter)
model_2.fit(X, y)
assert model_1.n_iter_ > model_2.n_iter_
assert model_2.n_iter_ > 3
# Strict tolerance and small max_iter should trigger a warning
model_3 = SGDClassifier(max_iter=3, tol=1e-3, random_state=0)
warning_message = (
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
model_3.fit(X, y)
assert model_3.n_iter_ == 3
def _test_loss_common(loss_function, cases):
# Test the different loss functions
# cases is a list of (p, y, expected)
for p, y, expected_loss, expected_dloss in cases:
assert_almost_equal(loss_function.py_loss(p, y), expected_loss)
assert_almost_equal(loss_function.py_dloss(p, y), expected_dloss)
def test_loss_hinge():
# Test Hinge (hinge / perceptron)
# hinge
loss = sgd_fast.Hinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.1, 1.0, 0.0, 0.0),
(-2.0, -1.0, 0.0, 0.0),
(1.0, 1.0, 0.0, -1.0),
(-1.0, -1.0, 0.0, 1.0),
(0.5, 1.0, 0.5, -1.0),
(2.0, -1.0, 3.0, 1.0),
(-0.5, -1.0, 0.5, 1.0),
(0.0, 1.0, 1, -1.0),
]
_test_loss_common(loss, cases)
# perceptron
loss = sgd_fast.Hinge(0.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-0.1, -1.0, 0.0, 0.0),
(0.0, 1.0, 0.0, -1.0),
(0.0, -1.0, 0.0, 1.0),
(0.5, -1.0, 0.5, 1.0),
(2.0, -1.0, 2.0, 1.0),
(-0.5, 1.0, 0.5, -1.0),
(-1.0, 1.0, 1.0, -1.0),
]
_test_loss_common(loss, cases)
def test_gradient_squared_hinge():
# Test SquaredHinge
loss = sgd_fast.SquaredHinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-2.0, -1.0, 0.0, 0.0),
(1.0, -1.0, 4.0, 4.0),
(-1.0, 1.0, 4.0, -4.0),
(0.5, 1.0, 0.25, -1.0),
(0.5, -1.0, 2.25, 3.0),
]
_test_loss_common(loss, cases)
def test_loss_log():
# Test Log (logistic loss)
loss = sgd_fast.Log()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, np.log(1.0 + np.exp(-1.0)), -1.0 / (np.exp(1.0) + 1.0)),
(1.0, -1.0, np.log(1.0 + np.exp(1.0)), 1.0 / (np.exp(-1.0) + 1.0)),
(-1.0, -1.0, np.log(1.0 + np.exp(-1.0)), 1.0 / (np.exp(1.0) + 1.0)),
(-1.0, 1.0, np.log(1.0 + np.exp(1.0)), -1.0 / (np.exp(-1.0) + 1.0)),
(0.0, 1.0, np.log(2), -0.5),
(0.0, -1.0, np.log(2), 0.5),
(17.9, -1.0, 17.9, 1.0),
(-17.9, 1.0, 17.9, -1.0),
]
_test_loss_common(loss, cases)
assert_almost_equal(loss.py_dloss(18.1, 1.0), np.exp(-18.1) * -1.0, 16)
assert_almost_equal(loss.py_loss(18.1, 1.0), np.exp(-18.1), 16)
assert_almost_equal(loss.py_dloss(-18.1, -1.0), np.exp(-18.1) * 1.0, 16)
assert_almost_equal(loss.py_loss(-18.1, 1.0), 18.1, 16)
def test_loss_squared_loss():
# Test SquaredLoss
loss = sgd_fast.SquaredLoss()
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(1.0, 1.0, 0.0, 0.0),
(1.0, 0.0, 0.5, 1.0),
(0.5, -1.0, 1.125, 1.5),
(-2.5, 2.0, 10.125, -4.5),
]
_test_loss_common(loss, cases)
def test_loss_huber():
# Test Huber
loss = sgd_fast.Huber(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.005, 0.1),
(0.0, 0.1, 0.005, -0.1),
(3.95, 4.0, 0.00125, -0.05),
(5.0, 2.0, 0.295, 0.1),
(-1.0, 5.0, 0.595, -0.1),
]
_test_loss_common(loss, cases)
def test_loss_modified_huber():
# (p, y, expected_loss, expected_dloss)
loss = sgd_fast.ModifiedHuber()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-1.0, -1.0, 0.0, 0.0),
(2.0, 1.0, 0.0, 0.0),
(0.0, 1.0, 1.0, -2.0),
(-1.0, 1.0, 4.0, -4.0),
(0.5, -1.0, 2.25, 3.0),
(-2.0, 1.0, 8, -4.0),
(-3.0, 1.0, 12, -4.0),
]
_test_loss_common(loss, cases)
def test_loss_epsilon_insensitive():
# Test EpsilonInsensitive
loss = sgd_fast.EpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.0, 0.0),
(-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0),
(2.2, 2.0, 0.1, 1.0),
(2.0, -1.0, 2.9, 1.0),
(2.0, 2.2, 0.1, -1.0),
(-2.0, 1.0, 2.9, -1.0),
]
_test_loss_common(loss, cases)
def test_loss_squared_epsilon_insensitive():
# Test SquaredEpsilonInsensitive
loss = sgd_fast.SquaredEpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.0, 0.0),
(-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0),
(2.2, 2.0, 0.01, 0.2),
(2.0, -1.0, 8.41, 5.8),
(2.0, 2.2, 0.01, -0.2),
(-2.0, 1.0, 8.41, -5.8),
]
_test_loss_common(loss, cases)
def test_multi_thread_multi_class_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and thread-based parallelism.
clf = SGDClassifier(
alpha=1e-3,
tol=1e-3,
max_iter=1000,
early_stopping=True,
n_iter_no_change=100,
random_state=0,
n_jobs=2,
)
clf.fit(iris.data, iris.target)
assert clf.n_iter_ > clf.n_iter_no_change
assert clf.n_iter_ < clf.n_iter_no_change + 20
assert clf.score(iris.data, iris.target) > 0.8
def test_multi_core_gridsearch_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and process-based multi-core
# parallelism.
param_grid = {
"alpha": np.logspace(-4, 4, 9),
"n_iter_no_change": [5, 10, 50],
}
clf = SGDClassifier(tol=1e-2, max_iter=1000, early_stopping=True, random_state=0)
search = RandomizedSearchCV(clf, param_grid, n_iter=5, n_jobs=2, random_state=0)
search.fit(iris.data, iris.target)
assert search.best_score_ > 0.8
@pytest.mark.parametrize("backend", ["loky", "multiprocessing", "threading"])
def test_SGDClassifier_fit_for_all_backends(backend):
# This is a non-regression smoke test. In the multi-class case,
# SGDClassifier.fit fits each class in a one-versus-all fashion using
# joblib.Parallel. However, each OvA step updates the coef_ attribute of
# the estimator in-place. Internally, SGDClassifier calls Parallel using
# require='sharedmem'. This test makes sure SGDClassifier.fit works
# consistently even when the user asks for a backend that does not provide
# sharedmem semantics.
# We further test a case where memmapping would have been used if
# SGDClassifier.fit was called from a loky or multiprocessing backend. In
# this specific case, in-place modification of clf.coef_ would have caused
# a segmentation fault when trying to write in a readonly memory mapped
# buffer.
random_state = np.random.RandomState(42)
# Create a classification problem with 50000 features and 20 classes. Using
# loky or multiprocessing this make the clf.coef_ exceed the threshold
# above which memmaping is used in joblib and loky (1MB as of 2018/11/1).
X = sp.random(500, 2000, density=0.02, format="csr", random_state=random_state)
y = random_state.choice(20, 500)
# Begin by fitting a SGD classifier sequentially
clf_sequential = SGDClassifier(max_iter=1000, n_jobs=1, random_state=42)
clf_sequential.fit(X, y)
# Fit a SGDClassifier using the specified backend, and make sure the
# coefficients are equal to those obtained using a sequential fit
clf_parallel = SGDClassifier(max_iter=1000, n_jobs=4, random_state=42)
with joblib.parallel_backend(backend=backend):
clf_parallel.fit(X, y)
assert_array_almost_equal(clf_sequential.coef_, clf_parallel.coef_)
@pytest.mark.parametrize(
"old_loss, new_loss, Estimator",
[
# TODO(1.2): Remove "squared_loss"
("squared_loss", "squared_error", linear_model.SGDClassifier),
("squared_loss", "squared_error", linear_model.SGDRegressor),
# TODO(1.3): Remove "log"
("log", "log_loss", linear_model.SGDClassifier),
],
)
def test_loss_deprecated(old_loss, new_loss, Estimator):
# Note: class BaseSGD calls self._validate_params() in __init__, therefore
# even instantiation of class raises FutureWarning for deprecated losses.
with pytest.warns(FutureWarning, match=f"The loss '{old_loss}' was deprecated"):
est1 = Estimator(loss=old_loss, random_state=0)
est1.fit(X, Y)
est2 = Estimator(loss=new_loss, random_state=0)
est2.fit(X, Y)
if hasattr(est1, "predict_proba"):
assert_allclose(est1.predict_proba(X), est2.predict_proba(X))
else:
assert_allclose(est1.predict(X), est2.predict(X))
@pytest.mark.parametrize(
"Estimator", [linear_model.SGDClassifier, linear_model.SGDRegressor]
)
def test_sgd_random_state(Estimator, global_random_seed):
# Train the same model on the same data without converging and check that we
# get reproducible results by fixing the random seed.
if Estimator == linear_model.SGDRegressor:
X, y = datasets.make_regression(random_state=global_random_seed)
else:
X, y = datasets.make_classification(random_state=global_random_seed)
# Fitting twice a model with the same hyper-parameters on the same training
# set with the same seed leads to the same results deterministically.
est = Estimator(random_state=global_random_seed, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_same_seed_a = est.fit(X, y).coef_
assert est.n_iter_ == 1
est = Estimator(random_state=global_random_seed, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_same_seed_b = est.fit(X, y).coef_
assert est.n_iter_ == 1
assert_allclose(coef_same_seed_a, coef_same_seed_b)
# Fitting twice a model with the same hyper-parameters on the same training
# set but with different random seed leads to different results after one
# epoch because of the random shuffling of the dataset.
est = Estimator(random_state=global_random_seed + 1, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_other_seed = est.fit(X, y).coef_
assert est.n_iter_ == 1
assert np.abs(coef_same_seed_a - coef_other_seed).max() > 1.0
def test_validation_mask_correctly_subsets(monkeypatch):
"""Test that data passed to validation callback correctly subsets.
Non-regression test for #23255.
"""
X, Y = iris.data, iris.target
n_samples = X.shape[0]
validation_fraction = 0.2
clf = linear_model.SGDClassifier(
early_stopping=True,
tol=1e-3,
max_iter=1000,
validation_fraction=validation_fraction,
)
mock = Mock(side_effect=_stochastic_gradient._ValidationScoreCallback)
monkeypatch.setattr(_stochastic_gradient, "_ValidationScoreCallback", mock)
clf.fit(X, Y)
X_val, y_val = mock.call_args[0][1:3]
assert X_val.shape[0] == int(n_samples * validation_fraction)
assert y_val.shape[0] == int(n_samples * validation_fraction)
|
the-stack_0_9743 | import csv
import ctypes
import datetime
from easydict import EasyDict
import logging
import multiprocessing as mp
import os
import random
import sys
from pathlib import Path
from typing import Union, Tuple, Dict
import yaml
from typing import Any, Dict, List, Tuple
import GPUtil
import numpy as np
import psutil
import torch as th
import torch
import torch.backends.cudnn as cudnn
from torch import cuda
EVALKEYS = ["r1", "r5", "r10", "r50", "medr", "meanr", "sum"]
EVALHEADER = "Retriev | R@1 | R@5 | R@10 | R@50 | MeanR | MedR | Sum"
def create_dataloader_path(data_root,
shot_per_group,
dataset_name,
text_feature_name='default',
feature_name_modality_a='action',
feature_name_modality_b='flow', pickle_path=None):
"""create the path to meta file and features
#last modality will be modality_b
Args:
data_root ([PATH]): [Path to the data folder]
shot_per_group ([Int]): [number of shots (clips) per group (video)]
Returns:
[Dict]: [path to meta data and video/language features]
"""
meta_data_path = {}
video_feat_path = {}
if pickle_path is not None:
pickle_path = Path(pickle_path)
else:
pickle_path = ""
for mod_name in feature_name_modality_a:
meta_data_path[mod_name] = Path(
os.path.join(data_root, "meta",
"meta_group{}_{}.json".format(shot_per_group, mod_name)))
video_feat_path[mod_name] = Path(
os.path.join(data_root, "group{}".format(shot_per_group),
"video_features", "{}.h5".format(mod_name)))
#If modality B is "text" then we already have it in language feats
if feature_name_modality_b != "text":
meta_data_path[feature_name_modality_b] = Path(
os.path.join(data_root, "meta",
"meta_group{}_{}.json".format(shot_per_group, feature_name_modality_b)))
video_feat_path[feature_name_modality_b] = Path(
os.path.join(data_root, "group{}".format(shot_per_group),
"video_features", "{}.h5".format(feature_name_modality_b)))
language_feat_path = Path(
os.path.join(data_root, "group{}".format(shot_per_group),
"language_features",
"text_{}.h5".format(text_feature_name)))
meta_text_len_path = Path(
os.path.join(data_root, "group{}".format(shot_per_group),
"language_features",
"text_lens_{}.json".format(text_feature_name)))
return {
"meta_data": meta_data_path,
"video_feats": video_feat_path,
"language_feats": language_feat_path,
"meta_text_len": meta_text_len_path,
"dataset_name": dataset_name,
"pickle_path": pickle_path
}
def get_csv_header_keys(compute_clip_retrieval):
metric_keys = ["ep", "time"]
prefixes = ["v", "p"]
if compute_clip_retrieval:
prefixes += ["c", "s"]
for prefix in prefixes:
for key in EVALKEYS:
metric_keys.append(f"{prefix}-{key}")
return metric_keys
def print_csv_results(csv_file: str, cfg: EasyDict, print_fn=print):
metric_keys = get_csv_header_keys(True)
with Path(csv_file).open("rt", encoding="utf8") as fh:
reader = csv.DictReader(fh, metric_keys)
line_data = [line for line in reader][1:]
for line in line_data:
for key, val in line.items():
line[key] = float(val)
if cfg.val.det_best_field == "val_score_at_1":
relevant_field = [line["v-r1"] + line["p-r1"] for line in line_data]
elif cfg.val.det_best_field == "val_clip_score_at_1":
relevant_field = [line["c-r1"] + line["s-r1"] for line in line_data]
else:
raise NotImplementedError
best_epoch = np.argmax(relevant_field)
def get_res(search_key):
results = {}
for key_, val_ in line_data[best_epoch].items():
if key_[:2] == f"{search_key}-":
results[key_[2:]] = float(val_)
return results
print_fn(f"Total epochs {len(line_data)}. "
f"Results from best epoch {best_epoch}:")
print_fn(EVALHEADER)
print_fn(retrieval_results_to_str(get_res("p"), "Par2Vid"))
print_fn(retrieval_results_to_str(get_res("v"), "Vid2Par"))
print_fn(retrieval_results_to_str(get_res("s"), "Sen2Cli"))
print_fn(retrieval_results_to_str(get_res("c"), "Cli2Sen"))
def expand_segment(num_frames, num_target_frames, start_frame, stop_frame):
num_frames_seg = stop_frame - start_frame + 1
changes = False
if num_target_frames > num_frames:
num_target_frames = num_frames
if num_frames_seg < num_target_frames:
while True:
if start_frame > 0:
start_frame -= 1
num_frames_seg += 1
changes = True
if num_frames_seg == num_target_frames:
break
if stop_frame < num_frames - 1:
stop_frame += 1
num_frames_seg += 1
changes = True
if num_frames_seg == num_target_frames:
break
return start_frame, stop_frame, changes
def set_seed(seed: int, set_deterministic: bool = True):
"""
Set all relevant seeds for torch, numpy and python
Args:
seed: int seed
set_deterministic: Guarantee deterministic training, possibly at the cost of performance.
"""
torch.manual_seed(seed)
cuda.manual_seed(seed)
cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
if set_deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
elif cudnn.benchmark or not cudnn.deterministic:
print("WARNING: Despite fixed seed {}, training may not be deterministic with {} "
"(must be False for deterministic training) and {} (must be True for deterministic "
"training)".format(seed, cudnn.benchmark, cudnn.deterministic))
def load_config(file: Union[str, Path]) -> EasyDict:
with Path(file).open("rt", encoding="utf8") as fh:
config = yaml.load(fh, Loader=yaml.Loader)
cfg = EasyDict(config)
# model symmetry
for check_network in ["text_pooler", "text_sequencer"]:
if getattr(cfg, check_network).name == "same":
setattr(cfg, check_network,
getattr(cfg,
getattr(cfg, check_network).same_as))
return cfg
def dump_config(cfg: EasyDict, file: Union[str, Path]) -> None:
with Path(file).open("wt", encoding="utf8") as fh:
yaml.dump(cfg, fh, Dumper=yaml.Dumper)
def print_config(cfg: EasyDict, level=0) -> None:
for key, val in cfg.items():
if isinstance(val, EasyDict):
print(" " * level, str(key), sep="")
print_config(val, level=level + 1)
else:
print(" " * level, f"{key} - f{val} ({type(val)})", sep="")
def make_shared_array(np_array: np.ndarray) -> mp.Array:
flat_shape = int(np.prod(np_array.shape))
shared_array_base = mp.Array(ctypes.c_float, flat_shape)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array = shared_array.reshape(np_array.shape)
shared_array[:] = np_array[:]
return shared_array
def compute_indices(num_frames_orig: int, num_frames_target: int,
is_train: bool):
def round_half_down(array: np.ndarray) -> np.ndarray:
return np.ceil(array - 0.5)
if is_train:
# random sampling during training
start_points = np.linspace(0,
num_frames_orig,
num_frames_target,
endpoint=False)
start_points = round_half_down(start_points).astype(int)
offsets = start_points[1:] - start_points[:-1]
np.random.shuffle(offsets)
last_offset = num_frames_orig - np.sum(offsets)
offsets = np.concatenate([offsets, np.array([last_offset])])
new_start_points = np.cumsum(offsets) - offsets[0]
offsets = np.roll(offsets, -1)
random_offsets = offsets * np.random.rand(num_frames_target)
indices = new_start_points + random_offsets
indices = np.floor(indices).astype(int)
return indices
# center sampling during validation
start_points = np.linspace(0,
num_frames_orig,
num_frames_target,
endpoint=False)
offset = num_frames_orig / num_frames_target / 2
indices = start_points + offset
indices = np.floor(indices).astype(int)
return indices
def truncated_normal_fill(shape: Tuple[int],
mean: float = 0,
std: float = 1,
limit: float = 2) -> torch.Tensor:
num_examples = 8
tmp = torch.empty(shape + (num_examples, )).normal_()
valid = (tmp < limit) & (tmp > -limit)
_, ind = valid.max(-1, keepdim=True)
return tmp.gather(-1, ind).squeeze(-1).mul_(std).add_(mean)
def retrieval_results_to_str(results: Dict[str, float], name: str):
return ("{:7s} | {:.3f} | {:.3f} | {:.3f} | {:.3f} | {:5.1f} | "
"{:5.1f} | {:6.3f}").format(name, *[results[a] for a in EVALKEYS])
# def compute_retr_vid_to_par(video_feat, cap_feat):
# similarity_scores = np.dot(video_feat, cap_feat.T)
# return compute_retrieval_metrics(similarity_scores)
def compute_retr_vid_to_par(video_feat, cap_feat):
num_points = video_feat.shape[0]
d = np.dot(video_feat, cap_feat.T)
return compute_retrieval_cosine(d, num_points)
def compute_retr_vid_to_par_softneighbor(video_feat, cap_feat):
num_points = video_feat.shape[0]
d = np.dot(video_feat, cap_feat.T)
return compute_retrieval_softneighbor(d, num_points)
def compute_retr_par_to_vid_softneighbor(video_feat, cap_feat):
num_points = video_feat.shape[0]
d = np.dot(cap_feat, video_feat.T)
return compute_retrieval_softneighbor(d, num_points)
def compute_retr_par_to_vid(video_feat, cap_feat):
num_points = video_feat.shape[0]
d = np.dot(cap_feat, video_feat.T)
return compute_retrieval_cosine(d, num_points)
# def compute_retr_par_to_vid(video_feat, cap_feat):
# similarity_scores = np.dot(cap_feat, video_feat.T)
# return compute_retrieval_metrics(similarity_scores)
def compute_retrieval_coarse_to_fine(coarse_ind, x_feat, y_feat):
len_dot_product = x_feat.shape[0]
dot_product = np.dot(x_feat, y_feat.T)
ranks = np.zeros(len_dot_product)
top1 = np.zeros(len_dot_product)
ind_coarse_to_fine = []
sum_corr = 0
group_k = 10
for index in range(len_dot_product):
ind_coarse = index // group_k
ind_fine = index - ind_coarse * group_k
ind_h = coarse_ind[ind_coarse]
if ind_h == ind_coarse:
# print("correct")
sum_corr += 1
inds = np.argsort(dot_product[index, ind_coarse * group_k : (ind_coarse + 1) * group_k])[::-1]
# print(inds, ind_fine)
where = np.where(inds == ind_fine)
rank = where[0][0]
else:
rank = 11
inds = [0]
ranks[index] = rank
#print(inds[0])
top1[index] = inds[0]
#print(sum_corr / len(ranks))
# print(ranks)
r1 = len(np.where(ranks < 1)[0]) / len(ranks)
r5 = len(np.where(ranks < 5)[0]) / len(ranks)
r10 = len(np.where(ranks < 10)[0]) / len(ranks)
r50 = len(np.where(ranks < 50)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
report_dict = dict()
report_dict['r1'] = r1
report_dict['r5'] = r5
report_dict['r10'] = r10
report_dict['r50'] = r50
report_dict['medr'] = medr
report_dict['meanr'] = meanr
report_dict['sum'] = r1 + r5 + r50
return report_dict, top1
def compute_retrieval_softneighbor(dot_product, len_dot_product):
ranks = np.zeros(len_dot_product)
top1 = np.zeros(len_dot_product)
sn_margin = 5 #neighborhood margin
for index in range(len_dot_product):
inds = np.argsort(dot_product[index])[::-1]
sn_inds = []
for i_sn in range(-sn_margin, sn_margin + 1):
idx_sn = min(len_dot_product - 1, max(0, (index + i_sn)))
where = np.where(inds == idx_sn)
#print(i_sn, idx_sn)
#print(index, i_sn, idx_sn, where)
sn_inds.append(where[0][0])
rank = sn_inds[np.argsort(sn_inds)[0]]
#print(sn_inds, rank)
#print("=="*20)
ranks[index] = rank
top1[index] = inds[0]
#print(sum(ranks < 0))
r1 = len(np.where(ranks < 1)[0]) / len(ranks)
r5 = len(np.where(ranks < 5)[0]) / len(ranks)
r10 = len(np.where(ranks < 10)[0]) / len(ranks)
r50 = len(np.where(ranks < 50)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
report_dict = dict()
report_dict['r1'] = r1
report_dict['r5'] = r5
report_dict['r10'] = r10
report_dict['r50'] = r50
report_dict['medr'] = medr
report_dict['meanr'] = meanr
report_dict['sum'] = r1 + r5 + r50
#print("R1 {}, R5 {}, R10 {}".format(r1, r5, r10))
return report_dict, ranks
def compute_retrieval_cosine(dot_product, len_dot_product):
ranks = np.zeros(len_dot_product)
top1 = np.zeros(len_dot_product)
ind_coarse_to_fine = []
for index in range(len_dot_product):
inds = np.argsort(dot_product[index])[::-1]
inds_org = np.argmax(dot_product[index])
where = np.where(inds == index)
ind_coarse_to_fine.append(inds_org)
rank = where[0][0]
ranks[index] = rank
top1[index] = inds[0]
r1 = len(np.where(ranks < 1)[0]) / len(ranks)
r5 = len(np.where(ranks < 5)[0]) / len(ranks)
r10 = len(np.where(ranks < 10)[0]) / len(ranks)
r50 = len(np.where(ranks < 50)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
report_dict = dict()
report_dict['r1'] = r1
report_dict['r5'] = r5
report_dict['r10'] = r10
report_dict['r50'] = r50
report_dict['medr'] = medr
report_dict['meanr'] = meanr
report_dict['sum'] = r1 + r5 + r50
return report_dict, top1, ind_coarse_to_fine
def compute_retrieval_metrics(dot_product):
sort_similarity = np.sort(-dot_product, axis=1)
diag_similarity = np.diag(-dot_product)
diag_similarity = diag_similarity[:, np.newaxis]
ranks = sort_similarity - diag_similarity
ranks = np.where(ranks == 0)
ranks = ranks[1]
report_dict = dict()
report_dict['r1'] = float(np.sum(ranks == 0)) / len(ranks)
report_dict['r5'] = float(np.sum(ranks < 5)) / len(ranks)
report_dict['r10'] = float(np.sum(ranks < 10)) / len(ranks)
report_dict['r50'] = float(np.sum(ranks < 50)) / len(ranks)
report_dict['medr'] = np.median(ranks) + 1
report_dict['meanr'] = ranks.mean()
report_dict[
'sum'] = report_dict['r1'] + report_dict['r5'] + report_dict['r50']
return report_dict, ranks
def get_logging_formatter():
return logging.Formatter("%(asctime)s %(levelname)s %(message)s",
datefmt="%m%d %H%M%S")
def get_timestamp_for_filename():
ts = str(datetime.datetime.now()).split(".")[0].replace(" ", "_")
ts = ts.replace(":", "_").replace("-", "_")
return ts
def get_logger_without_file(name, log_level="INFO") -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(log_level)
strm_hdlr = logging.StreamHandler(sys.stdout)
strm_hdlr.setFormatter(get_logging_formatter())
logger.addHandler(strm_hdlr)
return logger
def get_logger(logdir, name, filename="run", log_level="INFO",
log_file=True) -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(log_level)
formatter = get_logging_formatter()
if log_file:
file_path = Path(logdir) / "{}_{}.log".format(
filename,
str(datetime.datetime.now()).split(".")[0].replace(
" ", "_").replace(":", "_").replace("-", "_"))
file_hdlr = logging.FileHandler(str(file_path))
file_hdlr.setFormatter(formatter)
logger.addHandler(file_hdlr)
strm_hdlr = logging.StreamHandler(sys.stdout)
strm_hdlr.setFormatter(formatter)
logger.addHandler(strm_hdlr)
logger.propagate = False
return logger
def close_logger(logger: logging.Logger):
x = list(logger.handlers)
for i in x:
logger.removeHandler(i)
i.flush()
i.close()
# ---------- Profiling ----------
def profile_gpu_and_ram() -> Tuple[List[str], List[float], List[float], List[float], float, float, float]:
"""
Profile GPU and RAM.
Returns:
GPU names, total / used memory per GPU, load per GPU, total / used / available RAM.
"""
# get info from gputil
_str, dct_ = _get_gputil_info()
dev_num = os.getenv("CUDA_VISIBLE_DEVICES")
if dev_num is not None:
# single GPU set with OS flag
gpu_info = [dct_[int(dev_num)]]
else:
# possibly multiple gpus, aggregate values
gpu_info = []
for dev_dict in dct_:
gpu_info.append(dev_dict)
# convert to GPU info and MB to GB
gpu_names: List[str] = [gpu["name"] for gpu in gpu_info]
total_memory_per: List[float] = [gpu["memoryTotal"] / 1024 for gpu in gpu_info]
used_memory_per: List[float] = [gpu["memoryUsed"] / 1024 for gpu in gpu_info]
load_per: List[float] = [gpu["load"] / 100 for gpu in gpu_info]
# get RAM info and convert to GB
mem = psutil.virtual_memory()
ram_total: float = mem.total / 1024 ** 3
ram_used: float = mem.used / 1024 ** 3
ram_avail: float = mem.available / 1024 ** 3
return gpu_names, total_memory_per, used_memory_per, load_per, ram_total, ram_used, ram_avail
def _get_gputil_info():
"""
Returns info string for printing and list with gpu infos. Better formatting than the original GPUtil.
Returns:
gpu info string, List[Dict()] of values. dict example:
('id', 1),
('name', 'GeForce GTX TITAN X'),
('temperature', 41.0),
('load', 0.0),
('memoryUtil', 0.10645266950540452),
('memoryTotal', 12212.0)])]
"""
gpus = GPUtil.getGPUs()
attr_list = [
{'attr': 'id', 'name': 'ID'}, {'attr': 'name', 'name': 'Name'},
{'attr': 'temperature', 'name': 'Temp', 'suffix': 'C', 'transform': lambda x: x, 'precision': 0},
{'attr': 'load', 'name': 'GPU util.', 'suffix': '% GPU', 'transform': lambda x: x * 100,
'precision': 1},
{'attr': 'memoryUtil', 'name': 'Memory util.', 'suffix': '% MEM', 'transform': lambda x: x * 100,
'precision': 1}, {'attr': 'memoryTotal', 'name': 'Memory total', 'suffix': 'MB', 'precision': 0},
{'attr': 'memoryUsed', 'name': 'Memory used', 'suffix': 'MB', 'precision': 0}
]
gpu_strings = [''] * len(gpus)
gpu_info = []
for _ in range(len(gpus)):
gpu_info.append({})
for attrDict in attr_list:
attr_precision = '.' + str(attrDict['precision']) if (
'precision' in attrDict.keys()) else ''
attr_suffix = str(attrDict['suffix']) if (
'suffix' in attrDict.keys()) else ''
attr_transform = attrDict['transform'] if (
'transform' in attrDict.keys()) else lambda x: x
for gpu in gpus:
attr = getattr(gpu, attrDict['attr'])
attr = attr_transform(attr)
if isinstance(attr, float):
attr_str = ('{0:' + attr_precision + 'f}').format(attr)
elif isinstance(attr, int):
attr_str = '{0:d}'.format(attr)
elif isinstance(attr, str):
attr_str = attr
else:
raise TypeError('Unhandled object type (' + str(
type(attr)) + ') for attribute \'' + attrDict[
'name'] + '\'')
attr_str += attr_suffix
for gpuIdx, gpu in enumerate(gpus):
attr_name = attrDict['attr']
attr = getattr(gpu, attr_name)
attr = attr_transform(attr)
if isinstance(attr, float):
attr_str = ('{0:' + attr_precision + 'f}').format(attr)
elif isinstance(attr, int):
attr_str = ('{0:' + 'd}').format(attr)
elif isinstance(attr, str):
attr_str = ('{0:' + 's}').format(attr)
else:
raise TypeError(
'Unhandled object type (' + str(
type(attr)) + ') for attribute \'' + attrDict[
'name'] + '\'')
attr_str += attr_suffix
gpu_info[gpuIdx][attr_name] = attr
gpu_strings[gpuIdx] += '| ' + attr_str + ' '
return "\n".join(gpu_strings), gpu_info
|
the-stack_0_9748 | #!/usr/bin/env python
from __future__ import print_function
import itertools
from sympy import symbols, simplify_logic
TMPL_ADD = '''def {name}({args}):
if {body}
else:
raise Exception('{name}: Unhandled case "{{}}"'.format({args}))'''
def _sym_to_py(expr):
try:
if expr.is_Symbol:
return expr.name
elif expr.is_Function:
name = str(expr.func)
if name == 'And':
return '(' + ' & '.join(sym_to_py(a) for a in expr.args) + ')'
elif name == 'Xor':
return '(' + ' ^ '.join(sym_to_py(a) for a in expr.args) + ')'
elif name == 'Or':
return '(' + ' | '.join(sym_to_py(a) for a in expr.args) + ')'
elif name == 'Not':
assert len(expr.args) == 1
return '(~{})'.format(sym_to_py(expr.args[0]))
else:
raise Exception('Operator "{}" missing'.format(name))
else:
return str(bool(expr))
except Exception as e:
print(e)
import IPython; IPython.embed()
def sym_to_py(expr):
expr_simp = simplify_logic(expr)
# Stupid heuristics
if expr.count_ops() > expr_simp.count_ops():
expr = expr_simp
return _sym_to_py(expr).replace('True', '_one').replace('False', '_zero')
def bool_to_cond(b):
if b is None:
return 'X'
elif b:
return 'I'
else:
return 'O'
TMPL_COND = '''{cond}:
return {stmts}'''
def mk_funk(funk, vars, exprs_tmpls):
nn = ['ss[{}]'.format(i) for i in range(vars)]
ii = ['b{}'.format(i) for i in range(vars)]
ss = symbols(' '.join(ii))
conds = []
body = []
exprs = []
for e in exprs_tmpls:
exprs.append(eval(e.format(*nn)))
for vv in itertools.product((False, True, None), repeat=3):
s = dict((si, vi if vi is not None else si) for si, vi in zip(ss, vv))
cond = ' and '.join('{}.{}'.format(n, bool_to_cond(v)) for n, v in zip(ii, vv))
conds.append(cond)
body.append(tuple(sym_to_py(e.subs(s)) for e in exprs))
stmts = [TMPL_COND.format(cond=cond, stmts=', '.join(stmts)) for cond, stmts in zip(conds, body)]
stmts = '\n elif '.join(stmts)
return TMPL_ADD.format(
name=funk,
args=', '.join(ii),
body=stmts,
)
def main():
print('Generating the functions...')
defs = (
('_bit_add', 3, ['{0} ^ {1} ^ {2}', '({0} & {1}) | ({2} & ({0} ^ {1}))']),
)
funks = []
for d in defs:
print('[+] Making "{}"'.format(d[0]))
funks.append(mk_funk(*d))
src = 'from ._tbits import _zero, _one\n\n\n' + '\n\n'.join(funks) + '\n'
print('Writing to file...')
with open('./tbits/_gen_tables.py', 'w') as f:
f.write(src)
print('End of this giant hack :D')
if __name__ == '__main__':
main()
|
the-stack_0_9754 | # Copyright (c) 2021 Mira Geoscience Ltd.
#
# This file is part of geoapps.
#
# geoapps is distributed under the terms and conditions of the MIT License
# (see LICENSE file at the root of this source code package).
from typing import Optional, Union
import ipywidgets as widgets
import numpy as np
from geoh5py.data import FloatData, IntegerData, ReferencedData
from geoh5py.objects.object_base import ObjectBase
from geoh5py.workspace import Workspace
from ipywidgets import Dropdown, FloatText, SelectMultiple, VBox
from geoapps.base import BaseApplication
from geoapps.utils import utils
class ObjectDataSelection(BaseApplication):
"""
Application to select an object and corresponding data
"""
defaults = {}
_data = None
_objects = None
_add_groups = False
_select_multiple = False
_object_types = None
_find_label = []
def __init__(self, **kwargs):
self._data_panel = None
super().__init__(**kwargs)
@property
def add_groups(self):
"""
bool: Add data groups to the list of data choices
"""
return self._add_groups
@add_groups.setter
def add_groups(self, value):
assert isinstance(value, (bool, str)), "add_groups must be of type bool"
self._add_groups = value
@property
def data(self) -> Union[Dropdown, SelectMultiple]:
"""
Data selector
"""
if getattr(self, "_data", None) is None:
if self.select_multiple:
self._data = SelectMultiple(
description="Data: ",
)
else:
self._data = Dropdown(
description="Data: ",
)
if self._objects is not None:
self.update_data_list(None)
return self._data
@data.setter
def data(self, value):
assert isinstance(
value, (Dropdown, SelectMultiple)
), f"'Objects' must be of type {Dropdown} or {SelectMultiple}"
self._data = value
@property
def data_panel(self) -> VBox:
if getattr(self, "_data_panel", None) is None:
self._data_panel = VBox([self.objects, self.data])
return self._data_panel
@property
def main(self) -> VBox:
"""
:obj:`ipywidgets.VBox`: A box containing all widgets forming the application.
"""
self.__populate__(**self.defaults)
if self._main is None:
self._main = self.data_panel
self.update_data_list(None)
return self._main
@property
def objects(self) -> Dropdown:
"""
Object selector
"""
if getattr(self, "_objects", None) is None:
self.objects = Dropdown(description="Object:")
return self._objects
@objects.setter
def objects(self, value):
assert isinstance(value, Dropdown), f"'Objects' must be of type {Dropdown}"
self._objects = value
self._objects.observe(self.update_data_list, names="value")
@property
def object_types(self):
"""
Entity type
"""
if getattr(self, "_object_types", None) is None:
self._object_types = []
return self._object_types
@object_types.setter
def object_types(self, entity_types):
if not isinstance(entity_types, list):
entity_types = [entity_types]
for entity_type in entity_types:
assert issubclass(
entity_type, ObjectBase
), f"Provided object_types must be instances of {ObjectBase}"
self._object_types = tuple(entity_types)
@property
def find_label(self):
"""
Object selector
"""
if getattr(self, "_find_label", None) is None:
return []
return self._find_label
@find_label.setter
def find_label(self, values):
"""
Object selector
"""
if not isinstance(values, list):
values = [values]
for value in values:
assert isinstance(
value, str
), f"Labels to find must be strings. Value {value} of type {type(value)} provided"
self._find_label = values
@property
def select_multiple(self):
"""
bool: ALlow to select multiple data
"""
if getattr(self, "_select_multiple", None) is None:
self._select_multiple = False
return self._select_multiple
@select_multiple.setter
def select_multiple(self, value):
if getattr(self, "_data", None) is not None:
options = self._data.options
else:
options = []
self._select_multiple = value
if value:
self._data = SelectMultiple(description="Data: ", options=options)
else:
self._data = Dropdown(description="Data: ", options=options)
@property
def workspace(self) -> Optional[Workspace]:
"""
Target geoh5py workspace
"""
if (
getattr(self, "_workspace", None) is None
and getattr(self, "_h5file", None) is not None
):
self.workspace = Workspace(self.h5file)
return self._workspace
@workspace.setter
def workspace(self, workspace):
assert isinstance(workspace, Workspace), f"Workspace must of class {Workspace}"
self._workspace = workspace
self._h5file = workspace.h5file
# Refresh the list of objects
self.update_objects_list()
def get_selected_entities(self):
"""
Get entities from an active geoh5py Workspace
"""
if getattr(self, "_workspace", None) is not None and self._workspace.get_entity(
self.objects.value
):
for entity in self._workspace.get_entity(self.objects.value):
if isinstance(entity, ObjectBase):
obj = entity
if isinstance(self.data, Dropdown):
values = [self.data.value]
else:
values = self.data.value
data = []
for value in values:
if obj.get_data(value):
data += obj.get_data(value)
elif any([pg.name == value for pg in obj.property_groups]):
data += [
self.workspace.get_entity(prop)[0]
for prop in obj.find_or_create_property_group(
name=value
).properties
]
return obj, data
else:
return None, None
def update_data_list(self, _):
self.refresh.value = False
if getattr(self, "_workspace", None) is not None and self._workspace.get_entity(
self.objects.value
):
for entity in self._workspace.get_entity(self.objects.value):
if isinstance(entity, ObjectBase):
obj = entity
if getattr(obj, "get_data_list", None) is None:
return
options = [""]
if (self.add_groups or self.add_groups == "only") and obj.property_groups:
options = (
options
+ ["-- Groups --"]
+ [p_g.name for p_g in obj.property_groups]
)
if self.add_groups != "only":
data_list = obj.get_data_list()
options = (
options
+ ["--- Channels ---"]
+ [
obj.get_data(uid)[0].name
for uid in data_list
if isinstance(obj.get_data(uid)[0], (IntegerData, FloatData))
]
+ ["Z"]
)
value = self.data.value
self.data.options = options
if self.select_multiple and any([val in options for val in value]):
self.data.value = [val for val in value if val in options]
elif value in options:
self.data.value = value
elif self.find_label:
self.data.value = utils.find_value(self.data.options, self.find_label)
else:
self.data.options = []
self.refresh.value = True
def update_objects_list(self):
if getattr(self, "_workspace", None) is not None:
value = self.objects.value
if len(self.object_types) > 0:
options = [["", None]] + [
[obj.name, obj.uid]
for obj in self._workspace.objects
if isinstance(obj, self.object_types)
]
else:
options = [["", None]] + [
[value, uid]
for uid, value in self._workspace.list_objects_name.items()
]
if value in list(dict(options).values()): # Silent update
self.objects.unobserve(self.update_data_list, names="value")
self.objects.options = options
self.objects.value = value
self._objects.observe(self.update_data_list, names="value")
else:
self.objects.options = options
class LineOptions(ObjectDataSelection):
"""
Unique lines selection from selected data channel
"""
defaults = {"find_label": "line"}
_multiple_lines = None
def __init__(self, **kwargs):
self.defaults = self.update_defaults(**kwargs)
super().__init__(**self.defaults)
self.objects.observe(self.update_data_list, names="value")
self.data.observe(self.update_line_list, names="value")
self.data.description = "Lines field"
@property
def main(self):
if self._main is None:
self._main = VBox([self._data, self.lines])
return self._main
@property
def lines(self):
"""
Widget.SelectMultiple or Widget.Dropdown
"""
if getattr(self, "_lines", None) is None:
if self.multiple_lines:
self._lines = widgets.SelectMultiple(
description="Select lines:",
)
else:
self._lines = widgets.Dropdown(
description="Select line:",
)
return self._lines
@property
def multiple_lines(self):
if getattr(self, "_multiple_lines", None) is None:
self._multiple_lines = True
return self._multiple_lines
@multiple_lines.setter
def multiple_lines(self, value):
assert isinstance(
value, bool
), f"'multiple_lines' property must be of type {bool}"
self._multiple_lines = value
def update_line_list(self, _):
_, data = self.get_selected_entities()
if data and getattr(data[0], "values", None) is not None:
if isinstance(data[0], ReferencedData):
self.lines.options = [""] + list(data[0].value_map.map.values())
else:
self.lines.options = [""] + np.unique(data[0].values).tolist()
class TopographyOptions(ObjectDataSelection):
"""
Define the topography used by the inversion
"""
def __init__(
self, option_list=["None", "Object", "Relative to Sensor", "Constant"], **kwargs
):
self.defaults = self.update_defaults(**kwargs)
self.find_label = ["topo", "dem", "dtm", "elevation", "Z"]
self._offset = FloatText(description="Vertical offset (+ve up)")
self._constant = FloatText(
description="Elevation (m)",
)
self.option_list = {
"None": widgets.Label("No topography"),
"Object": self.data_panel,
"Relative to Sensor": self._offset,
"Constant": self._constant,
}
self._options = widgets.RadioButtons(
options=option_list,
description="Define by:",
)
self.options.observe(self.update_options)
super().__init__(**self.defaults)
@property
def panel(self):
return self._panel
@property
def constant(self):
return self._constant
@property
def main(self):
if self._main is None:
self._main = VBox([self.options, self.option_list[self.options.value]])
return self._main
@property
def offset(self):
return self._offset
@property
def options(self):
return self._options
def update_options(self, _):
self.main.children = [
self.options,
self.option_list[self.options.value],
]
|
the-stack_0_9755 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 13:01:35 2019
@author: avelinojaver
"""
import sys
from pathlib import Path
root_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(root_dir))
from cell_localization.flow import CoordFlow, collate_simple
from cell_localization.models import get_model
import tqdm
from torch.utils.data import DataLoader
import torch
import numpy as np
import matplotlib.pylab as plt
if __name__ == '__main__':
#%%
root_dir = '/Users/avelinojaver/OneDrive - Nexus365/bladder_cancer_tils/eosinophils/training/20x/'
num_workers = 4
batch_size = 4#512
gauss_sigma = 1.5
device = 'cpu'
flow_args = dict(
roi_size = 16,
scale_int = (0, 4095),
prob_unseeded_patch = 0.5,
zoom_range = (0.97, 1.03),
int_aug_offset = (-0.2, 0.2),
int_aug_expansion = (0.7, 1.3),
samples_per_epoch = batch_size*100
)
gen = CoordFlow(root_dir, **flow_args)
loader = DataLoader(gen,
batch_size = batch_size,
shuffle = True,
num_workers = num_workers,
collate_fn = collate_simple
)
model = get_model('ind+clf+unet-simple', 3, 2, 'maxlikelihood')
for images, targets in tqdm.tqdm(loader):
images = torch.from_numpy(np.stack(images)).to(device)
targets = [{k: torch.from_numpy(v).to(device) for k, v in target.items()} for target in targets]
#%%
model.train()
losses = model(images, targets)
loss = sum([v for v in losses.values()])
loss.backward()
#%%
model.eval()
losses, predictions = model(images, targets)
break
#%%
# import torch.nn.functional as F
# xhat, features = model.mapping_network(images)
#
#
#
# #I want to get a map to indicate if there is an cell or not
# feats = features[0].permute((0, 2, 3, 1))
# n_batch, clf_h, clf_w, clf_n_filts = feats.shape
# feats = feats.contiguous().view(-1, clf_n_filts, 1, 1)
# clf_scores = model.clf_patch_head(feats)
# #scores, has_cells = clf_scores.max(dim=1)
# clf_scores = F.softmax(clf_scores, dim = 1)
# clf_scores = clf_scores[:, 1].view(n_batch, 1, clf_h, clf_w)
#
#
# clf_scores = F.interpolate(clf_scores, size = xhat.shape[-2:], mode = 'bilinear', align_corners=False)
#
#
# bad = clf_scores< 0.5
# xhat[bad] = xhat[bad].mean()
# xhat = model.preevaluation(xhat)
# outs = model.nms(xhat)
# #%%
# proposals = []
#
# mm = xhat.detach().numpy()
# for m, pred in zip(mm, outs):
# pred_coords = pred[0]
# boxes = torch.cat((pred_coords - model.proposal_half_size, pred_coords + model.proposal_half_size), dim = -1)
# proposals.append(boxes)
#
#
# from matplotlib import patches
#
# fig, ax = plt.subplots(1, 1, sharex=True, sharey=True, figsize = (10, 10))
#
#
# ax.imshow(m)
# for box in boxes:
# cm, w, l = (box[0], box[1]), box[2] - box[0], box[3] - box[1]
# rect = patches.Rectangle(cm, w, l,linewidth=1,edgecolor='r',facecolor='none')
# ax.add_patch(rect)
# break
|
the-stack_0_9757 | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
seg={'a':21, 'b':8, 'c':11, 'd':26, 'e':19, 'f':20, 'g':13}
for s in "abcdefg":
GPIO.setup(seg[s], GPIO.OUT, initial=0)
zif=[16, 12, 7, 6]
for z in zif:
GPIO.setup(z, GPIO.OUT, initial=1)
dp = 5
GPIO.setup(dp, GPIO.OUT, initial=0)
zahl = [
"abcdef", #0
"bc", #1
"abdeg", #2
"abcdg", #3
"bcfg", #4
"acdfg", #5
"acdefg", #6
"abc", #7
"abcdefg", #8
"abcdfg" #9
]
z = [0, 0, 0, 0]
print("STRG+C beendet das Programm.")
def za():
for i in range(4):
for s in "abcdefg":
GPIO.output(seg[s], 0)
GPIO.output(zif[i], 0)
for s in zahl[z[i]]:
GPIO.output(seg[s], 1)
if i == 1:
GPIO.output(dp, 1)
else:
GPIO.output(dp, 0)
time.sleep(0.005)
GPIO.output(zif[i], 1)
try:
while True:
t = time.localtime()
h = t.tm_hour
m = t.tm_min
z[0]=int(h / 10)
z[1]=h % 10
z[2]=int(m / 10)
z[3]=m % 10
while time.localtime().tm_min == m:
za()
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
|
the-stack_0_9759 | from itertools import tee, zip_longest
from django.db.models import Model
from typing import Any, Iterator, List, Sequence, Type, TypeVar, Tuple
T = TypeVar('T', covariant=True)
def pairwise(iterable: Sequence[T]) -> Iterator[Tuple[T, T]]:
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def modelname(model: Type[Model]) -> str:
return f'{model._meta.app_label}.{model._meta.model_name}'
def is_sublist(needle: Sequence[Any], haystack: Sequence[Any]) -> bool:
if not needle:
return True
if not haystack:
return False
max_k = len(needle) - 1
k = 0
for elem in haystack:
if elem != needle[k]:
k = 0
continue
if k == max_k:
return True
k += 1
return False
def parent_to_inherited_path(parent: Type[Model], inherited: Type[Model]) -> List[str]:
"""
Pull relation path segments from `parent` to `inherited` model
in multi table inheritance.
"""
bases = inherited._meta.get_base_chain(parent)
relations: List[str] = []
model = inherited
for base in bases:
relations.append(model._meta.parents[base].remote_field.name)
model = base
return relations[::-1]
def skip_equal_segments(ps: Sequence[str], rs: Sequence[str]) -> List[str]:
"""
Skips all equal segments from the beginning of `ps` and `rs`
returning left over segments from `ps`.
"""
add: bool = False
ret: List[str] = []
for left, right in zip_longest(ps, rs):
if left is None:
break
if left != right:
add = True
if add:
ret.append(left)
return ret
|
the-stack_0_9760 | # Copyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mdts.lib.binding_manager import BindingManager
from mdts.lib.physical_topology_manager import PhysicalTopologyManager
from mdts.lib.virtual_topology_manager import VirtualTopologyManager
from mdts.tests.utils.asserts import async_assert_that
from mdts.tests.utils.asserts import receives
from mdts.tests.utils.asserts import should_NOT_receive
from mdts.tests.utils.asserts import within_sec
from mdts.tests.utils.utils import bindings
from mdts.tests.utils.utils import wait_on_futures
from nose.plugins.attrib import attr
import logging
import random
import time
LOG = logging.getLogger(__name__)
PTM = PhysicalTopologyManager(
'../topologies/mmm_physical_test_conn_tracking.yaml')
VTM = VirtualTopologyManager(
'../topologies/mmm_virtual_test_conn_tracking.yaml')
BM = BindingManager(PTM, VTM)
binding_multihost = {
'description': 'spanning across multiple MMs',
'bindings': [
{'binding':
{'device_name': 'bridge-000-001', 'port_id': 2,
'host_id': 1, 'interface_id': 1}},
{'binding':
{'device_name': 'bridge-000-001', 'port_id': 3,
'host_id': 2, 'interface_id': 2}},
]
}
def set_bridge_port_filters(bridge_name, port_id, inbound_filter_name,
outbound_filter_name):
'''Sets an in-bound filter to a bridge.'''
bridge_port = VTM.get_device_port(bridge_name, port_id)
inbound_filter = None
if inbound_filter_name:
inbound_filter = VTM.get_chain(inbound_filter_name)
outbound_filter = None
if outbound_filter_name:
outbound_filter = VTM.get_chain(outbound_filter_name)
bridge_port.set_inbound_filter(inbound_filter)
bridge_port.set_outbound_filter(outbound_filter)
# Sleep here to make sure that the settings have been propagated.
time.sleep(5)
def unset_bridge_port_filters(bridge_name, port_id):
'''Sets an in-bound filter to a bridge.'''
set_bridge_port_filters(bridge_name, port_id, None, None)
def get_random_port_num():
'''Returns a random port number from a free port range.
NOTE: Using a random number may cause test indeterminacy on a rare occasion.
'''
return random.randint(49152, 65535)
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_filtering_by_network_address():
'''
Title: Tests packets filtering based on network address
Scenario:
When: A VM sends UDP packets to another host on the same bridge.
Then: The UDP packets reaches the receiver.
Then: Filtering rule chains based on network address (IP address) are set on
the bridge port that the receiver host is connected to.
And: The UDP packets from the same sender do NOT reach the receiver.
'''
sender = BM.get_iface_for_port('bridge-000-001', 2)
receiver = BM.get_iface_for_port('bridge-000-001', 3)
# Reset in/out-bound filters.
unset_bridge_port_filters('bridge-000-001', 3)
port_num = get_random_port_num()
# FIXME: do not use harcoded values!
f1 = async_assert_that(receiver,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'No filtering: receives UDP packets from sender.')
f2 = sender.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Set a filtering rule based on network address.
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_nw_in',
'connection_tracking_nw_out')
f1 = async_assert_that(receiver, should_NOT_receive(
'dst host 172.16.1.2 and udp',
within_sec(5)),
'Packets are filtered based on IP address.')
f2 = sender.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_connection_tracking_by_network_addres():
'''
Title: Tests NW address based connection tracking.
Scenario:
When: A VM, supposedly inside a FW, sends UDP packets to another host,
supposedly outside the FS, on the same bridge.
And: The host outside the FW receives the UDP packets.
Then: A connection-tracking-based peep hole is established.
And: The outside host now can send UDP packets to the inside host.
'''
outside = BM.get_iface_for_port('bridge-000-001', 2)
inside = BM.get_iface_for_port('bridge-000-001', 3)
# Set a filtering rule based on ip address.
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_nw_in',
'connection_tracking_nw_out')
# Send forward packets to set up a connection-tracking based peep hole in
# the filter.
port_num = get_random_port_num()
f1 = async_assert_that(outside,
receives('dst host 172.16.1.1 and udp',
within_sec(5)),
'Outside host receives forward packets from inside.')
f2 = inside.send_udp('aa:bb:cc:00:01:01', '172.16.1.1', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Verify the peep hole.
f1 = async_assert_that(inside,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'Outside host can send packets to inside '
'via a peep hole.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_filtering_by_dl():
'''
Title: Tests dl-based packet filtering.
Scenario:
When: A VM sends UDP packets to another host on the same bridge.
Then: The UDP packets reach the receiver without filtering rule chains.
Then: A filtering rule chain based on mac address is set on the bridge.
And: UDP packets from the same host do NOT reach the same destination host.
'''
outside = BM.get_iface_for_port('bridge-000-001', 2)
inside = BM.get_iface_for_port('bridge-000-001', 3)
# Reset an in-bound filter.
unset_bridge_port_filters('bridge-000-001', 3)
port_num = get_random_port_num()
f1 = async_assert_that(
inside,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'No filtering: inside receives UDP packets from outside.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Set a filtering rule based on mac addresses
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_dl_in',
'connection_tracking_dl_out')
f1 = async_assert_that(inside,
should_NOT_receive(
'dst host 172.16.1.2 and udp',
within_sec(5)),
'Packets are filtered based on mac address.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_connection_tracking_with_drop_by_dl():
'''
Title: Tests dl-based connection tracking.
Scenario:
When: A VM inside a FW sends UDP packets to a VM outside.
And: The outside receives the UDP packets.
Then: A connection-tracking-based peep hole is established.
And: The outside now can send UDP packets to the inside.
'''
outside = BM.get_iface_for_port('bridge-000-001', 2)
inside = BM.get_iface_for_port('bridge-000-001', 3)
# Set a filtering rule based on mac addresses
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_dl_in',
'connection_tracking_dl_out')
# Send forward packets to set up a connection-tracking based peep hole in
# the filter.
port_num = get_random_port_num()
f1 = async_assert_that(outside,
receives('dst host 172.16.1.1 and udp',
within_sec(5)),
'The outside host receives forward packets '
'from the inside.')
f2 = inside.send_udp('aa:bb:cc:00:01:01', '172.16.1.1', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Verify the peep hole.
f1 = async_assert_that(inside,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'The outside host can now send packets to the inside'
'via a peep hole.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
|
the-stack_0_9761 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join, basename, splitext, dirname, exists
from os import getenv
from distutils.spawn import find_executable
from distutils.version import LooseVersion
from tools.toolchains import mbedToolchain, TOOLCHAIN_PATHS
from tools.hooks import hook_tool
from tools.utils import run_cmd, NotSupportedException
class GCC(mbedToolchain):
OFFICIALLY_SUPPORTED = True
LINKER_EXT = '.ld'
LIBRARY_EXT = '.a'
STD_LIB_NAME = "lib%s.a"
DIAGNOSTIC_PATTERN = re.compile('((?P<file>[^:]+):(?P<line>\d+):)(?P<col>\d+):? (?P<severity>warning|[eE]rror|fatal error): (?P<message>.+)')
GCC_RANGE = (LooseVersion("6.0.0"), LooseVersion("7.0.0"))
GCC_VERSION_RE = re.compile(b"\d+\.\d+\.\d+")
def __init__(self, target, notify=None, macros=None, build_profile=None,
build_dir=None):
mbedToolchain.__init__(self, target, notify, macros,
build_profile=build_profile, build_dir=build_dir)
tool_path=TOOLCHAIN_PATHS['GCC_ARM']
# Add flags for current size setting
default_lib = "std"
if hasattr(target, "default_lib"):
default_lib = target.default_lib
elif hasattr(target, "default_build"): # Legacy
default_lib = target.default_build
if default_lib == "small":
self.flags["common"].append("-DMBED_RTOS_SINGLE_THREAD")
self.flags["ld"].append("--specs=nano.specs")
if target.core == "Cortex-M0+":
self.cpu = ["-mcpu=cortex-m0plus"]
elif target.core.startswith("Cortex-M4"):
self.cpu = ["-mcpu=cortex-m4"]
elif target.core.startswith("Cortex-M7"):
self.cpu = ["-mcpu=cortex-m7"]
elif target.core.startswith("Cortex-M23"):
self.cpu = ["-mcpu=cortex-m23"]
elif target.core.startswith("Cortex-M33F"):
self.cpu = ["-mcpu=cortex-m33+nodsp"]
elif target.core.startswith("Cortex-M33"):
self.cpu = ["-march=armv8-m.main"]
else:
self.cpu = ["-mcpu={}".format(target.core.lower())]
if target.core.startswith("Cortex-M"):
self.cpu.append("-mthumb")
# FPU handling, M7 possibly to have double FPU
if target.core == "Cortex-M4F":
self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif target.core == "Cortex-M7F":
self.cpu.append("-mfpu=fpv5-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif target.core == "Cortex-M7FD":
self.cpu.append("-mfpu=fpv5-d16")
self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
if ((target.core.startswith("Cortex-M23") or
target.core.startswith("Cortex-M33")) and
not target.core.endswith("-NS")):
self.cpu.append("-mcmse")
self.flags["ld"].extend([
"-Wl,--cmse-implib",
"-Wl,--out-implib=%s" % join(build_dir, "cmse_lib.o")
])
elif target.core == "Cortex-M23-NS" or target.core == "Cortex-M33-NS" or target.core == "Cortex-M33F-NS":
self.flags["ld"].append("-DDOMAIN_NS=1")
self.flags["common"] += self.cpu
main_cc = join(tool_path, "arm-none-eabi-gcc")
main_cppc = join(tool_path, "arm-none-eabi-g++")
self.asm = [main_cc] + self.flags['asm'] + self.flags["common"]
self.cc = [main_cc]
self.cppc =[main_cppc]
self.cc += self.flags['c'] + self.flags['common']
self.cppc += self.flags['cxx'] + self.flags['common']
self.flags['ld'] += self.cpu
self.ld = [join(tool_path, "arm-none-eabi-gcc")] + self.flags['ld']
self.sys_libs = ["stdc++", "supc++", "m", "c", "gcc", "nosys"]
self.preproc = [join(tool_path, "arm-none-eabi-cpp"), "-E", "-P"]
self.ar = join(tool_path, "arm-none-eabi-ar")
self.elf2bin = join(tool_path, "arm-none-eabi-objcopy")
self.use_distcc = (bool(getenv("DISTCC_POTENTIAL_HOSTS", False))
and not getenv("MBED_DISABLE_DISTCC", False))
def version_check(self):
stdout, _, retcode = run_cmd([self.cc[0], "--version"], redirect=True)
msg = None
match = self.GCC_VERSION_RE.search(stdout)
found_version = LooseVersion(match.group(0).decode('utf-8')) if match else None
min_ver, max_ver = self.GCC_RANGE
if found_version and (found_version < min_ver or found_version >= max_ver):
msg = ("Compiler version mismatch: Have {}; "
"expected version >= {} and < {}"
.format(found_version, min_ver, max_ver))
elif not match:
msg = ("Compiler version mismatch: Could not detect version; "
"expected version >= {} and < {}"
.format(min_ver, max_ver))
if msg:
self.notify.cc_info({
"message": msg,
"file": "",
"line": "",
"col": "",
"severity": "Warning",
})
def is_not_supported_error(self, output):
return "error: #error [NOT_SUPPORTED]" in output
def parse_output(self, output):
# The warning/error notification is multiline
msg = None
for line in output.splitlines():
match = self.DIAGNOSTIC_PATTERN.search(line)
if match is not None:
if msg is not None:
self.notify.cc_info(msg)
msg = None
msg = {
'severity': match.group('severity').lower(),
'file': match.group('file'),
'line': match.group('line'),
'col': match.group('col'),
'message': match.group('message'),
'text': '',
'target_name': self.target.name,
'toolchain_name': self.name
}
if msg is not None:
self.notify.cc_info(msg)
def get_dep_option(self, object):
base, _ = splitext(object)
dep_path = base + '.d'
return ["-MD", "-MF", dep_path]
def get_config_option(self, config_header):
return ['-include', config_header]
def get_compile_options(self, defines, includes, for_asm=False):
opts = ['-D%s' % d for d in defines]
if self.RESPONSE_FILES:
opts += ['@%s' % self.get_inc_file(includes)]
else:
opts += ["-I%s" % i for i in includes]
config_header = self.get_config_header()
if config_header is not None:
opts = opts + self.get_config_option(config_header)
return opts
@hook_tool
def assemble(self, source, object, includes):
# Build assemble command
cmd = self.asm + self.get_compile_options(self.get_symbols(True), includes) + ["-o", object, source]
# Call cmdline hook
cmd = self.hook.get_cmdline_assembler(cmd)
# Return command array, don't execute
return [cmd]
@hook_tool
def compile(self, cc, source, object, includes):
# Build compile command
cmd = cc + self.get_compile_options(self.get_symbols(), includes)
cmd.extend(self.get_dep_option(object))
cmd.extend(["-o", object, source])
# Call cmdline hook
cmd = self.hook.get_cmdline_compiler(cmd)
if self.use_distcc:
cmd = ["distcc"] + cmd
return [cmd]
def compile_c(self, source, object, includes):
return self.compile(self.cc, source, object, includes)
def compile_cpp(self, source, object, includes):
return self.compile(self.cppc, source, object, includes)
@hook_tool
def link(self, output, objects, libraries, lib_dirs, mem_map):
libs = []
for l in libraries:
name, _ = splitext(basename(l))
libs.append("-l%s" % name[3:])
libs.extend(["-l%s" % l for l in self.sys_libs])
# Preprocess
if mem_map:
preproc_output = join(dirname(output), ".link_script.ld")
cmd = (self.preproc + [mem_map] + self.ld[1:] +
[ "-o", preproc_output])
self.notify.cc_verbose("Preproc: %s" % ' '.join(cmd))
self.default_cmd(cmd)
mem_map = preproc_output
# Build linker command
map_file = splitext(output)[0] + ".map"
cmd = self.ld + ["-o", output, "-Wl,-Map=%s" % map_file] + objects + ["-Wl,--start-group"] + libs + ["-Wl,--end-group"]
if mem_map:
cmd.extend(['-T', mem_map])
for L in lib_dirs:
cmd.extend(['-L', L])
cmd.extend(libs)
# Call cmdline hook
cmd = self.hook.get_cmdline_linker(cmd)
if self.RESPONSE_FILES:
# Split link command to linker executable + response file
cmd_linker = cmd[0]
link_files = self.get_link_file(cmd[1:])
cmd = [cmd_linker, "@%s" % link_files]
# Exec command
self.notify.cc_verbose("Link: %s" % ' '.join(cmd))
self.default_cmd(cmd)
@hook_tool
def archive(self, objects, lib_path):
if self.RESPONSE_FILES:
param = ["@%s" % self.get_arch_file(objects)]
else:
param = objects
# Exec command
self.default_cmd([self.ar, 'rcs', lib_path] + param)
@hook_tool
def binary(self, resources, elf, bin):
# Build binary command
_, fmt = splitext(bin)
bin_arg = {'.bin': 'binary', '.hex': 'ihex'}[fmt]
cmd = [self.elf2bin, "-O", bin_arg, elf, bin]
# Call cmdline hook
cmd = self.hook.get_cmdline_binary(cmd)
# Exec command
self.notify.cc_verbose("FromELF: %s" % ' '.join(cmd))
self.default_cmd(cmd)
@staticmethod
def name_mangle(name):
return "_Z%i%sv" % (len(name), name)
@staticmethod
def make_ld_define(name, value):
return "-D%s=%s" % (name, value)
@staticmethod
def redirect_symbol(source, sync, build_dir):
return "-Wl,--defsym=%s=%s" % (source, sync)
@staticmethod
def check_executable():
"""Returns True if the executable (arm-none-eabi-gcc) location
specified by the user exists OR the executable can be found on the PATH.
Returns False otherwise."""
if not TOOLCHAIN_PATHS['GCC_ARM'] or not exists(TOOLCHAIN_PATHS['GCC_ARM']):
if find_executable('arm-none-eabi-gcc'):
TOOLCHAIN_PATHS['GCC_ARM'] = ''
return True
else:
return False
else:
exec_name = join(TOOLCHAIN_PATHS['GCC_ARM'], 'arm-none-eabi-gcc')
return exists(exec_name) or exists(exec_name + '.exe')
class GCC_ARM(GCC):
pass
|
the-stack_0_9762 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from thing.models import Contract
# We need to delete duplicate contract IDs before we make it a unique field
def delete_contract_duplicates_forward(apps, schema_editor):
ids = Contract.objects.all().values_list('contract_id', flat=True).distinct()
for contract_id in ids:
contracts = Contract.objects.filter(contract_id=contract_id)
if contracts.count() > 1:
itercontracts = iter(contracts)
next(itercontracts)
for contract in itercontracts:
contract.delete()
def delete_contract_duplicates_reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('thing', '0022_auto_20170824_1956'),
]
operations = [
migrations.RunPython(
delete_contract_duplicates_forward,
delete_contract_duplicates_reverse
),
migrations.AlterField(
model_name='contract',
name='contract_id',
field=models.IntegerField(unique=True, db_index=True),
preserve_default=True,
),
]
|
the-stack_0_9763 | from collections import namedtuple
from spinn.util.data import *
ModelSpec_ = namedtuple("ModelSpec", ["model_dim", "word_embedding_dim",
"batch_size", "vocab_size", "seq_length",
"model_visible_dim"])
def ModelSpec(*args, **kwargs):
args = dict(list(zip(ModelSpec_._fields, args)))
args.update(kwargs)
# Defaults
if "model_visible_dim" not in args:
args["model_visible_dim"] = args["model_dim"]
return ModelSpec_(**args)
|
the-stack_0_9764 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestSliceOp(OpTest):
def setUp(self):
self.op_type = "slice"
self.config()
self.inputs = {'Input': self.input}
self.outputs = {'Out': self.out}
self.attrs = {
'axes': self.axes,
'starts': self.starts,
'ends': self.ends
}
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [1, 0, 2]
self.ends = [3, 3, 4]
self.axes = [0, 1, 2]
self.out = self.input[1:3, 0:3, 2:4, :]
def test_check_output(self):
self.check_output()
class TestCase1(TestSliceOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [-3, 0, 2]
self.ends = [3, 100, -1]
self.axes = [0, 1, 2]
self.out = self.input[-3:3, 0:100, 2:-1, :]
class TestCase2(TestSliceOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [-3, 0, 2]
self.ends = [3, 100, -1]
self.axes = [0, 1, 3]
self.out = self.input[-3:3, 0:100, :, 2:-1]
if __name__ == '__main__':
unittest.main()
|
the-stack_0_9765 | import pytest
from app.models import Expense, User
from app.models import expense
pytestmark = pytest.mark.nologin
def headers(tok):
return {'Authorization': f'Bearer {tok}'}
def test_get_expenses(db_with_expenses, token, client):
resp = client.get('/api/expenses?page=1&page_size=10',
headers=headers(token))
assert resp.status_code == 200
expenses = resp.get_json()
assert len(expenses) == 10
for i, e in enumerate(expenses):
assert e['description'] == f'Item {15-i}'
def test_get_expense(db_with_expenses, token, client):
exp = Expense.query.filter_by(description='Item 10').first()
db_data = {
'id': exp.id,
'description': exp.description,
'amount': exp.amount_str,
'date': exp.date.isoformat(),
'payment_mode': exp.payment_mode.mode,
'estimate': exp.estimate.item if exp.estimate else None,
'tags': ','.join([tag.tagname for tag in exp.tags]),
'comments': exp.comments,
'created_on': exp.created_on.isoformat(),
'updated_on': exp.updated_on.isoformat()
}
resp = client.get(f'/api/expenses/{exp.id}',
headers=headers(token))
assert resp.status_code == 200
e = resp.get_json()
assert e == db_data
def test_update_expense(db_with_expenses, token, client):
# Following code is needed because we are accessing amount
expense.current_user = User.query.get(1)
exp = Expense.query.filter_by(description='Item 10').first()
orig_amount = exp.amount
orig_comments = exp.comments
data = {
'amount': int(orig_amount + 10),
'comments': 'Amount increased by 10'
}
resp = client.patch(f'/api/expenses/{exp.id}',
json=data,
headers=headers(token))
assert resp.status_code == 200
e = resp.get_json()
assert e['id'] == exp.id
assert e['amount'] == str(orig_amount + 10)
assert e['comments'] != orig_comments
assert e['comments'] == 'Amount increased by 10'
def test_delete_expense(db_with_expenses, token, client):
exp = Expense.query.filter_by(description='Item 10').first()
resp = client.delete(f'/api/expenses/{exp.id}', headers=headers(token))
assert resp.status_code == 204
def test_delete_forbidden(db_with_expenses, token, client):
exp = Expense.query.filter_by(description='Item user2').first()
resp = client.delete(f'/api/expenses/{exp.id}', headers=headers(token))
assert resp.status_code == 403
assert resp.get_json()['msg'].startswith('Forbidden')
def test_delete_not_found(db_with_expenses, token, client):
resp = client.delete('/api/expenses/50', headers=headers(token))
assert resp.status_code == 404
assert resp.get_json()['msg'] == 'Expense not found.'
|
the-stack_0_9766 | """The sma integration."""
from __future__ import annotations
from datetime import timedelta
import logging
import pysma
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry, ConfigEntryNotReady
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PATH,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_SSL,
CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
CONF_CUSTOM,
CONF_FACTOR,
CONF_GROUP,
CONF_KEY,
CONF_UNIT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
PLATFORMS,
PYSMA_COORDINATOR,
PYSMA_DEVICE_INFO,
PYSMA_OBJECT,
PYSMA_REMOVE_LISTENER,
PYSMA_SENSORS,
)
_LOGGER = logging.getLogger(__name__)
def _parse_legacy_options(
entry: ConfigEntry, sensor_def: pysma.sensor.Sensors
) -> list[str]:
"""Parse legacy configuration options.
This will parse the legacy CONF_SENSORS and CONF_CUSTOM configuration options
to support deprecated yaml config from platform setup.
"""
# Add sensors from the custom config
sensor_def.add(
[
pysma.sensor.Sensor(
o[CONF_KEY], n, o[CONF_UNIT], o[CONF_FACTOR], o.get(CONF_PATH)
)
for n, o in entry.data.get(CONF_CUSTOM).items()
]
)
# Parsing of sensors configuration
if not (config_sensors := entry.data.get(CONF_SENSORS)):
return []
# Support import of legacy config that should have been removed from 0.99, but was still functional
# See also #25880 and #26306. Functional support was dropped in #48003
if isinstance(config_sensors, dict):
config_sensors_list = []
for name, attr in config_sensors.items():
config_sensors_list.append(name)
config_sensors_list.extend(attr)
config_sensors = config_sensors_list
# Find and replace sensors removed from pysma
# This only alters the config, the actual sensor migration takes place in _migrate_old_unique_ids
for sensor in config_sensors.copy():
if sensor in pysma.const.LEGACY_MAP:
config_sensors.remove(sensor)
config_sensors.append(pysma.const.LEGACY_MAP[sensor]["new_sensor"])
# Only sensors from config should be enabled
for sensor in sensor_def:
sensor.enabled = sensor.name in config_sensors
return config_sensors
def _migrate_old_unique_ids(
hass: HomeAssistant,
entry: ConfigEntry,
sensor_def: pysma.sensor.Sensors,
config_sensors: list[str],
) -> None:
"""Migrate legacy sensor entity_id format to new format."""
entity_registry = er.async_get(hass)
# Create list of all possible sensor names
possible_sensors = set(
config_sensors + [s.name for s in sensor_def] + list(pysma.const.LEGACY_MAP)
)
for sensor in possible_sensors:
if sensor in sensor_def:
pysma_sensor = sensor_def[sensor]
original_key = pysma_sensor.key
elif sensor in pysma.const.LEGACY_MAP:
# If sensor was removed from pysma we will remap it to the new sensor
legacy_sensor = pysma.const.LEGACY_MAP[sensor]
pysma_sensor = sensor_def[legacy_sensor["new_sensor"]]
original_key = legacy_sensor["old_key"]
else:
_LOGGER.error("%s does not exist", sensor)
continue
# Find entity_id using previous format of unique ID
entity_id = entity_registry.async_get_entity_id(
"sensor", "sma", f"sma-{original_key}-{sensor}"
)
if not entity_id:
continue
# Change unique_id to new format using the device serial in entry.unique_id
new_unique_id = f"{entry.unique_id}-{pysma_sensor.key}_{pysma_sensor.key_idx}"
entity_registry.async_update_entity(entity_id, new_unique_id=new_unique_id)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up sma from a config entry."""
# Init the SMA interface
protocol = "https" if entry.data[CONF_SSL] else "http"
url = f"{protocol}://{entry.data[CONF_HOST]}"
verify_ssl = entry.data[CONF_VERIFY_SSL]
group = entry.data[CONF_GROUP]
password = entry.data[CONF_PASSWORD]
session = async_get_clientsession(hass, verify_ssl=verify_ssl)
sma = pysma.SMA(session, url, password, group)
try:
# Get updated device info
device_info = await sma.device_info()
# Get all device sensors
sensor_def = await sma.get_sensors()
except (
pysma.exceptions.SmaReadException,
pysma.exceptions.SmaConnectionException,
) as exc:
raise ConfigEntryNotReady from exc
# Parse legacy options if initial setup was done from yaml
if entry.source == SOURCE_IMPORT:
config_sensors = _parse_legacy_options(entry, sensor_def)
_migrate_old_unique_ids(hass, entry, sensor_def, config_sensors)
# Define the coordinator
async def async_update_data():
"""Update the used SMA sensors."""
try:
await sma.read(sensor_def)
except (
pysma.exceptions.SmaReadException,
pysma.exceptions.SmaConnectionException,
) as exc:
raise UpdateFailed(exc) from exc
interval = timedelta(
seconds=entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
)
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="sma",
update_method=async_update_data,
update_interval=interval,
)
try:
await coordinator.async_config_entry_first_refresh()
except ConfigEntryNotReady:
await sma.close_session()
raise
# Ensure we logout on shutdown
async def async_close_session(event):
"""Close the session."""
await sma.close_session()
remove_stop_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_close_session
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
PYSMA_OBJECT: sma,
PYSMA_COORDINATOR: coordinator,
PYSMA_SENSORS: sensor_def,
PYSMA_REMOVE_LISTENER: remove_stop_listener,
PYSMA_DEVICE_INFO: device_info,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
data = hass.data[DOMAIN].pop(entry.entry_id)
await data[PYSMA_OBJECT].close_session()
data[PYSMA_REMOVE_LISTENER]()
return unload_ok
|
the-stack_0_9767 | import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
# pad our images with zeros in both dimensions.
# images should be input images of shape raw_image_size by raw_image_size, and output is of size image_size x image_size
# input image tensor should be of shape [-1, raw_image_size^2]. Output is of shape [-1, image_size^2]
def padImages(images, rawside, outside):
padSize = (outside - rawside)/2.0
images = [np.reshape(image, (rawside, rawside)) for image in images]
leftPad = int(np.floor(padSize))
rightPad = int(np.ceil(padSize))
padImages = np.lib.pad(images, [[0,0],[leftPad, rightPad], [leftPad, rightPad]], 'constant')
return np.reshape(padImages, (-1,outside*outside))
if __name__=="__main__":
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
print(padImages([[1,1,1,1],[2,2,2,2]], 2, 10))
|
the-stack_0_9768 | def binary_Search(arr,val):
start=0
end=len(arr)-1
while start<=end:
mid=(start+end)//2
if arr[mid]==val:
return True
elif arr[mid]>val:
end=mid-1
else:
start=mid+1
return False
arr=list(map(int,input().split()))
#sort the array if not sorted
arr.sort()
#val= value to find in array
val=int(input())
if binary_Search(arr,val):
print("value found")
else:
print("value not found")
|
the-stack_0_9771 | # DADSA - Assignment 1
# Reece Benson
import random
from classes import Menu as Menu
from classes import Handler as Handler
class App():
# Define the variables we will be using
debug = True
handler = None
# Define all of the properties we will need to use
def __init__(self):
# Load our handler
self.handler = Handler.Handler(self)
self.handler.load()
# Generate rounds
self.generate_rounds()
# Hold the program
self.exit()
# Generate our rounds from our player list
def generate_rounds(self):
# Let's generate our random rounds from scratch
round_data = { }
# Write our new data to memory
for seasonId in self.handler.get_seasons():
season = self.handler.get_season(seasonId)
players = season.players()
# Generate our rounds
for gender in players:
# Create our gendered rounds
if(not gender in round_data):
# Default Round Cap
roundCap = 3
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
roundCap = season.settings()[gender + "_cap"]
# Update our round data
round_data.update({ gender: [ { "_roundCap": roundCap } ] })
# Create our round data from players
rnd_players = random.sample(players[gender], len(players[gender]))
x = 0
for i in range(len(rnd_players) /2 ):
# Grab our versus players
playerOne = rnd_players[x]
playerTwo = rnd_players[x + 1]
print("{0} vs {1} ".format(playerOne.name(), playerTwo.name()))
# Increment by 2 to avoid having duplicates
print(x)
x += 2
print(round_data)
# A method which exits the program after the user has pressed the Return key
def exit(self):
input(">>> Press <Return> to terminate the program")
exit()
App() |
the-stack_0_9773 | from numpy import *
import numpy as np
import random
import math
import os
import time
import pandas as pd
import csv
import math
import random
# 定义函数
def ReadMyCsv(SaveList, fileName):
csv_reader = csv.reader(open(fileName))
for row in csv_reader: # 把每个rna疾病对加入OriginalData,注意表头
SaveList.append(row)
return
def ReadMyCsv2(SaveList, fileName):
csv_reader = csv.reader(open(fileName))
for row in csv_reader:
counter = 0
while counter < len(row):
row[counter] = int(row[counter]) # 转换数据类型
counter = counter + 1
SaveList.append(row)
return
def StorFile(data, fileName):
with open(fileName, "w", newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(data)
return
def MyNegativeSample():
'''
# 由AssociationMatrix和PositiveSampe得到PositiveSample
'''
# 数据
AssociationMatrix = []
ReadMyCsv(AssociationMatrix, "SecondRandomShuffle\AssociationMatrix.csv")
print('AssociationMatrix[0]', AssociationMatrix[0])
print(len(AssociationMatrix))
PositiveSample = []
ReadMyCsv(PositiveSample, 'SecondRandomShuffle\PositiveSample.csv')
print(len(PositiveSample))
print(PositiveSample[0])
NegativeSample = []
counterN = 0
while counterN < len(PositiveSample): # 随机选出一个疾病rna对,次数
counter1 = random.randint(0, len(AssociationMatrix) - 1)
counter2 = random.randint(0, len(AssociationMatrix[counter1]) - 1)
flag1 = 0
counter3 = 0
while counter3 < len(PositiveSample): # 正样本中是否存在
if counter1 == PositiveSample[counter3][0] and counter2 == PositiveSample[counter3][1]:
print('fail1')
flag1 = 1
break
counter3 = counter3 + 1
if flag1 == 1:
continue
flag2 = 0
counter4 = 0
while counter4 < len(NegativeSample): # 在已选的负样本中没有,防止重复
if counter1 == NegativeSample[counter4][0] and counter2 == NegativeSample[counter4][1]:
print('fail2')
flag2 = 1
break
counter4 = counter4 + 1
if flag2 == 1:
continue
if (flag1 == 0 & flag2 == 0):
Pair = []
Pair.append(counter1)
Pair.append(counter2)
NegativeSample.append(Pair) # 下三角矩阵,一定满足行 > 列
print(counterN)
counterN = counterN + 1
print(len(NegativeSample))
StorFile(NegativeSample, 'SecondRandomShuffle\\NegativeSample.csv')
return NegativeSample
|
the-stack_0_9776 | # -*- coding:UTF-8 -*-
import falcon
class QuoteResource:
def on_get(self, req, resp):
"""Handles GET requests"""
quote = {
'quote': (
"I've always been more interested in "
"the future than in the past."
),
'author': 'Grace Hopper'
}
resp.media = quote
api = falcon.API()
api.add_route('/quote', QuoteResource())
|
the-stack_0_9777 |
def test():
test_instructions = """0
3
0
1
-3"""
assert run(test_instructions) == 10
def run(in_val):
instructions = [int(instruction) for instruction in in_val.split()]
offsets = {}
register = 0
steps = 0
while True:
try:
instruction = instructions[register]
except IndexError:
return steps
relative_offset = offsets.get(register, 0)
offset = instruction + relative_offset
if offset >= 3:
offsets[register] = relative_offset - 1
else:
offsets[register] = relative_offset + 1
register += offset
steps += 1
|
the-stack_0_9778 | # -*- coding: utf-8 -*-
#
# VEGASSceneDetect: Python-Based Video Scene Detector
# ---------------------------------------------------------------
# [ Site: http://www.hlinke.de/ ]
# [ Github: coming soon ]
# [ Documentation: coming soon ]
#
# Copyright (C) 2019 Harold Linke <http://www.hlinke.de>.
# VEGASSceneDetect is licensed under the BSD 3-Clause License; see the included
# LICENSE file
#
# VEGASSceneDetect is based on pySceneDetect by Brandon Castellano
# ---------------------------------------------------------------
# [ Site: http://www.bcastell.com/projects/pyscenedetect/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
# [ Documentation: http://pyscenedetect.readthedocs.org/ ]
#
# Copyright (C) 2012-2018 Brandon Castellano <http://www.bcastell.com>.
#
# PySceneDetect is licensed under the BSD 3-Clause License; see the included
# LICENSE file, or visit one of the following pages for details:
# - https://github.com/Breakthrough/PySceneDetect/
# - http://www.bcastell.com/projects/pyscenedetect/
#
# This software uses the Numpy, OpenCV, click, tqdm, and pytest libraries.
# See the included LICENSE files or one of the above URLs for more information.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
""" PySceneDetect config.py
this file reads configuration parameters for VegasScenedetect
"""
import json
import os
class SD_Config():
""" Configuration of VEGASSCendetector """
def __init__(self):
# type:
""" SDConfig Constructor Method (__init__)
Arguments:
None
Raises:
None
"""
#**VEGASPython**
filedir = os.path.dirname(os.path.realpath(__file__))
filepath = os.path.join(filedir, 'config.json')
with open(filepath, "r") as read_file:
data = json.load(read_file)
self.useHSV = False # define if HSV or BGR should be used for content analysis - BGR is faster
self.showPreview = True # defines, that the preview if the analysed video shoul dbe shown
self.previewFrameSkip = 100 # defines the number of frames skipped before the preview is updated - lower numbers make the preview smoother but cost processing time
self.showFrameValues = False # the values calculated for each frame are shown - can be used to chek the threshold for a cut
self.threshold = 30
self.min_scene_len = 15
try:
if "useHSV" in data:
self.useHSV = data["useHSV"] # define if HSV or BGR should be used for content analysis - BGR is faster
if "showPreview" in data:
self.showPreview = data["showPreview"] # defines, that the preview if the analysed video shoul dbe shown
if "PreviewFrameSkip" in data:
self.PreviewFrameSkip = data["PreviewFrameSkip"] # defines the number of frames skipped before the preview is updated - lower numbers make the preview smoother but cost processing time
if "showFrameValues" in data:
self.showFrameValues = data["showFrameValues"] # the values calculated for each frame are shown - can be used to chek the threshold for a cut
if "threshold" in data:
self.threshold = data["threshold"] # threshold that needs to be exceeded to determine a cut
if "min_scene_len" in data:
self.min_scene_len = data["min_scene_len"]
if "print_parameters" in data:
print("Parameters: useHSV:",self.useHSV, " showPreview:", self.showPreview, " PreviewFrameSkip:", self.PreviewFrameSkip, " showFrameValues:", self.showFrameValues, " Threshold:",self.threshold)
except:
print ("Error in Config File")
print(data)
print("useHSV:",self.useHSV, " showPreview:", self.showPreview, " PreviewFrameSkip:", self.PreviewFrameSkip, " showFrameValues:", self.showFrameValues, " Threshold:",self.threshold)
#**/VEGASPython**
|
the-stack_0_9779 | from PIL import Image
import tqdm
from itertools import compress
import os
import multiprocessing
def is_corrupted_img(file):
try:
img = Image.open(file)
img.verify()
return img is None
except:
return True
def read_files(path, exts):
files = []
for r, d, f in os.walk(path):
for file in f:
if file.lower().endswith(exts):
file = os.path.join(r, file)
file = os.path.abspath(file)
file = file.replace(os.sep, "/")
files.append(file)
return files
def search_corrputed_imgs(path,
exts=("jpg",
"png",
"jpeg",
"bmp",
"tif",
"tiff")
):
exts = tuple(exts)
imgs = read_files(path, exts)
corrupted_imgs = []
if len(imgs) > 0:
with multiprocessing.Pool() as p:
is_corrupted = list(tqdm.tqdm(p.imap(is_corrupted_img, imgs), total=len(imgs)))
corrupted_imgs = list(compress(imgs, is_corrupted))
return corrupted_imgs
|
the-stack_0_9780 | #!/usr/bin/env python3
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
import sys
sys.path.insert(0, '..')
import test_harness
HOST_EXE_FILE = os.path.join(test_harness.WORK_DIR, 'a.out')
def run_compiler_test(source_file, target):
if target == 'host':
subprocess.check_call(['cc', source_file, '-o', HOST_EXE_FILE],
stderr=subprocess.STDOUT)
result = subprocess.check_output(HOST_EXE_FILE)
test_harness.check_result(source_file, result.decode())
else:
hex_file = test_harness.build_program([source_file])
result = test_harness.run_program(hex_file, target)
test_harness.check_result(source_file, result)
test_list = [fname for fname in test_harness.find_files(
('.c', '.cpp')) if not fname.startswith('_')]
all_targets = [fname for fname in test_list if 'noverilator' not in fname]
test_harness.register_tests(run_compiler_test, all_targets, [
'emulator', 'verilator', 'host', 'fpga'])
noverilator_targets = [fname for fname in test_list if 'noverilator' in fname]
test_harness.register_tests(
run_compiler_test, noverilator_targets, ['emulator', 'host', 'fpga'])
test_harness.execute_tests()
|
the-stack_0_9783 | from datetime import datetime
import numpy as np
import copy
import logging
import math
import os
import pickle
import time
import tempfile
from typing import Callable, Dict, List, Optional, Type, Union
import ray
from ray.exceptions import RayError
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env.normalize_actions import NormalizeActionWrapper
from ray.rllib.env.env_context import EnvContext
from ray.rllib.models import MODEL_DEFAULTS
from ray.rllib.policy import Policy
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.utils import FilterManager, deep_update, merge_dicts
from ray.rllib.utils.spaces import space_utils
from ray.rllib.utils.framework import try_import_tf, TensorStructType
from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.typing import TrainerConfigDict, \
PartialTrainerConfigDict, EnvInfoDict, ResultDict, EnvType, PolicyID
from ray.tune.registry import ENV_CREATOR, register_env, _global_registry
from ray.tune.trainable import Trainable
from ray.tune.trial import ExportFormat
from ray.tune.resources import Resources
from ray.tune.logger import Logger, UnifiedLogger
from ray.tune.result import DEFAULT_RESULTS_DIR
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
# Max number of times to retry a worker failure. We shouldn't try too many
# times in a row since that would indicate a persistent cluster issue.
MAX_WORKER_FAILURE_RETRIES = 3
# yapf: disable
# __sphinx_doc_begin__
COMMON_CONFIG: TrainerConfigDict = {
# === Settings for Rollout Worker processes ===
# Number of rollout worker actors to create for parallel sampling. Setting
# this to 0 will force rollouts to be done in the trainer actor.
"num_workers": 2,
# Number of environments to evaluate vectorwise per worker. This enables
# model inference batching, which can improve performance for inference
# bottlenecked workloads.
"num_envs_per_worker": 1,
# Divide episodes into fragments of this many steps each during rollouts.
# Sample batches of this size are collected from rollout workers and
# combined into a larger batch of `train_batch_size` for learning.
#
# For example, given rollout_fragment_length=100 and train_batch_size=1000:
# 1. RLlib collects 10 fragments of 100 steps each from rollout workers.
# 2. These fragments are concatenated and we perform an epoch of SGD.
#
# When using multiple envs per worker, the fragment size is multiplied by
# `num_envs_per_worker`. This is since we are collecting steps from
# multiple envs in parallel. For example, if num_envs_per_worker=5, then
# rollout workers will return experiences in chunks of 5*100 = 500 steps.
#
# The dataflow here can vary per algorithm. For example, PPO further
# divides the train batch into minibatches for multi-epoch SGD.
"rollout_fragment_length": 200,
# Whether to rollout "complete_episodes" or "truncate_episodes" to
# `rollout_fragment_length` length unrolls. Episode truncation guarantees
# evenly sized batches, but increases variance as the reward-to-go will
# need to be estimated at truncation boundaries.
"batch_mode": "truncate_episodes",
# === Settings for the Trainer process ===
# Number of GPUs to allocate to the trainer process. Note that not all
# algorithms can take advantage of trainer GPUs. This can be fractional
# (e.g., 0.3 GPUs).
"num_gpus": 0,
# Training batch size, if applicable. Should be >= rollout_fragment_length.
# Samples batches will be concatenated together to a batch of this size,
# which is then passed to SGD.
"train_batch_size": 200,
# Arguments to pass to the policy model. See models/catalog.py for a full
# list of the available model options.
"model": MODEL_DEFAULTS,
# Arguments to pass to the policy optimizer. These vary by optimizer.
"optimizer": {},
# === Environment Settings ===
# Discount factor of the MDP.
"gamma": 0.99,
# Number of steps after which the episode is forced to terminate. Defaults
# to `env.spec.max_episode_steps` (if present) for Gym envs.
"horizon": None,
# Calculate rewards but don't reset the environment when the horizon is
# hit. This allows value estimation and RNN state to span across logical
# episodes denoted by horizon. This only has an effect if horizon != inf.
"soft_horizon": False,
# Don't set 'done' at the end of the episode. Note that you still need to
# set this if soft_horizon=True, unless your env is actually running
# forever without returning done=True.
"no_done_at_end": False,
# Arguments to pass to the env creator.
"env_config": {},
# Environment name can also be passed via config.
"env": None,
# Unsquash actions to the upper and lower bounds of env's action space
"normalize_actions": False,
# Whether to clip rewards during Policy's postprocessing.
# None (default): Clip for Atari only (r=sign(r)).
# True: r=sign(r): Fixed rewards -1.0, 1.0, or 0.0.
# False: Never clip.
# [float value]: Clip at -value and + value.
# Tuple[value1, value2]: Clip at value1 and value2.
"clip_rewards": None,
# Whether to clip actions to the action space's low/high range spec.
"clip_actions": True,
# Whether to use "rllib" or "deepmind" preprocessors by default
"preprocessor_pref": "deepmind",
# The default learning rate.
"lr": 0.0001,
# === Debug Settings ===
# Whether to write episode stats and videos to the agent log dir. This is
# typically located in ~/ray_results.
"monitor": False,
# Set the ray.rllib.* log level for the agent process and its workers.
# Should be one of DEBUG, INFO, WARN, or ERROR. The DEBUG level will also
# periodically print out summaries of relevant internal dataflow (this is
# also printed out once at startup at the INFO level). When using the
# `rllib train` command, you can also use the `-v` and `-vv` flags as
# shorthand for INFO and DEBUG.
"log_level": "WARN",
# Callbacks that will be run during various phases of training. See the
# `DefaultCallbacks` class and `examples/custom_metrics_and_callbacks.py`
# for more usage information.
"callbacks": DefaultCallbacks,
# Whether to attempt to continue training if a worker crashes. The number
# of currently healthy workers is reported as the "num_healthy_workers"
# metric.
"ignore_worker_failures": False,
# Log system resource metrics to results. This requires `psutil` to be
# installed for sys stats, and `gputil` for GPU metrics.
"log_sys_usage": True,
# Use fake (infinite speed) sampler. For testing only.
"fake_sampler": False,
# === Deep Learning Framework Settings ===
# tf: TensorFlow
# tfe: TensorFlow eager
# torch: PyTorch
"framework": "tf",
# Enable tracing in eager mode. This greatly improves performance, but
# makes it slightly harder to debug since Python code won't be evaluated
# after the initial eager pass. Only possible if framework=tfe.
"eager_tracing": False,
# === Exploration Settings ===
# Default exploration behavior, iff `explore`=None is passed into
# compute_action(s).
# Set to False for no exploration behavior (e.g., for evaluation).
"explore": True,
# Provide a dict specifying the Exploration object's config.
"exploration_config": {
# The Exploration class to use. In the simplest case, this is the name
# (str) of any class present in the `rllib.utils.exploration` package.
# You can also provide the python class directly or the full location
# of your class (e.g. "ray.rllib.utils.exploration.epsilon_greedy.
# EpsilonGreedy").
"type": "StochasticSampling",
# Add constructor kwargs here (if any).
},
# === Evaluation Settings ===
# Evaluate with every `evaluation_interval` training iterations.
# The evaluation stats will be reported under the "evaluation" metric key.
# Note that evaluation is currently not parallelized, and that for Ape-X
# metrics are already only reported for the lowest epsilon workers.
"evaluation_interval": None,
# Number of episodes to run per evaluation period. If using multiple
# evaluation workers, we will run at least this many episodes total.
"evaluation_num_episodes": 10,
# Internal flag that is set to True for evaluation workers.
"in_evaluation": False,
# Typical usage is to pass extra args to evaluation env creator
# and to disable exploration by computing deterministic actions.
# IMPORTANT NOTE: Policy gradient algorithms are able to find the optimal
# policy, even if this is a stochastic one. Setting "explore=False" here
# will result in the evaluation workers not using this optimal policy!
"evaluation_config": {
# Example: overriding env_config, exploration, etc:
# "env_config": {...},
# "explore": False
},
# Number of parallel workers to use for evaluation. Note that this is set
# to zero by default, which means evaluation will be run in the trainer
# process. If you increase this, it will increase the Ray resource usage
# of the trainer since evaluation workers are created separately from
# rollout workers.
"evaluation_num_workers": 0,
# Customize the evaluation method. This must be a function of signature
# (trainer: Trainer, eval_workers: WorkerSet) -> metrics: dict. See the
# Trainer._evaluate() method to see the default implementation. The
# trainer guarantees all eval workers have the latest policy state before
# this function is called.
"custom_eval_function": None,
# === Advanced Rollout Settings ===
# Use a background thread for sampling (slightly off-policy, usually not
# advisable to turn on unless your env specifically requires it).
"sample_async": False,
# Experimental flag to speed up sampling and use "trajectory views" as
# generic ModelV2 `input_dicts` that can be requested by the model to
# contain different information on the ongoing episode.
# NOTE: Only supported for PyTorch so far.
"_use_trajectory_view_api": False,
# Element-wise observation filter, either "NoFilter" or "MeanStdFilter".
"observation_filter": "NoFilter",
# Whether to synchronize the statistics of remote filters.
"synchronize_filters": True,
# Configures TF for single-process operation by default.
"tf_session_args": {
# note: overridden by `local_tf_session_args`
"intra_op_parallelism_threads": 2,
"inter_op_parallelism_threads": 2,
"gpu_options": {
"allow_growth": True,
},
"log_device_placement": False,
"device_count": {
"CPU": 1
},
"allow_soft_placement": True, # required by PPO multi-gpu
},
# Override the following tf session args on the local worker
"local_tf_session_args": {
# Allow a higher level of parallelism by default, but not unlimited
# since that can cause crashes with many concurrent drivers.
"intra_op_parallelism_threads": 8,
"inter_op_parallelism_threads": 8,
},
# Whether to LZ4 compress individual observations
"compress_observations": False,
# Wait for metric batches for at most this many seconds. Those that
# have not returned in time will be collected in the next train iteration.
"collect_metrics_timeout": 180,
# Smooth metrics over this many episodes.
"metrics_smoothing_episodes": 100,
# If using num_envs_per_worker > 1, whether to create those new envs in
# remote processes instead of in the same worker. This adds overheads, but
# can make sense if your envs can take much time to step / reset
# (e.g., for StarCraft). Use this cautiously; overheads are significant.
"remote_worker_envs": False,
# Timeout that remote workers are waiting when polling environments.
# 0 (continue when at least one env is ready) is a reasonable default,
# but optimal value could be obtained by measuring your environment
# step / reset and model inference perf.
"remote_env_batch_wait_ms": 0,
# Minimum time per train iteration (frequency of metrics reporting).
"min_iter_time_s": 0,
# Minimum env steps to optimize for per train call. This value does
# not affect learning, only the length of train iterations.
"timesteps_per_iteration": 0,
# This argument, in conjunction with worker_index, sets the random seed of
# each worker, so that identically configured trials will have identical
# results. This makes experiments reproducible.
"seed": None,
# Any extra python env vars to set in the trainer process, e.g.,
# {"OMP_NUM_THREADS": "16"}
"extra_python_environs_for_driver": {},
# The extra python environments need to set for worker processes.
"extra_python_environs_for_worker": {},
# === Advanced Resource Settings ===
# Number of CPUs to allocate per worker.
"num_cpus_per_worker": 1,
# Number of GPUs to allocate per worker. This can be fractional. This is
# usually needed only if your env itself requires a GPU (i.e., it is a
# GPU-intensive video game), or model inference is unusually expensive.
"num_gpus_per_worker": 0,
# Any custom Ray resources to allocate per worker.
"custom_resources_per_worker": {},
# Number of CPUs to allocate for the trainer. Note: this only takes effect
# when running in Tune. Otherwise, the trainer runs in the main program.
"num_cpus_for_driver": 1,
# You can set these memory quotas to tell Ray to reserve memory for your
# training run. This guarantees predictable execution, but the tradeoff is
# if your workload exceeeds the memory quota it will fail.
# Heap memory to reserve for the trainer process (0 for unlimited). This
# can be large if your are using large train batches, replay buffers, etc.
"memory": 0,
# Object store memory to reserve for the trainer process. Being large
# enough to fit a few copies of the model weights should be sufficient.
# This is enabled by default since models are typically quite small.
"object_store_memory": 0,
# Heap memory to reserve for each worker. Should generally be small unless
# your environment is very heavyweight.
"memory_per_worker": 0,
# Object store memory to reserve for each worker. This only needs to be
# large enough to fit a few sample batches at a time. This is enabled
# by default since it almost never needs to be larger than ~200MB.
"object_store_memory_per_worker": 0,
# === Offline Datasets ===
# Specify how to generate experiences:
# - "sampler": generate experiences via online simulation (default)
# - a local directory or file glob expression (e.g., "/tmp/*.json")
# - a list of individual file paths/URIs (e.g., ["/tmp/1.json",
# "s3://bucket/2.json"])
# - a dict with string keys and sampling probabilities as values (e.g.,
# {"sampler": 0.4, "/tmp/*.json": 0.4, "s3://bucket/expert.json": 0.2}).
# - a function that returns a rllib.offline.InputReader
"input": "sampler",
# Specify how to evaluate the current policy. This only has an effect when
# reading offline experiences. Available options:
# - "wis": the weighted step-wise importance sampling estimator.
# - "is": the step-wise importance sampling estimator.
# - "simulation": run the environment in the background, but use
# this data for evaluation only and not for learning.
"input_evaluation": ["is", "wis"],
# Whether to run postprocess_trajectory() on the trajectory fragments from
# offline inputs. Note that postprocessing will be done using the *current*
# policy, not the *behavior* policy, which is typically undesirable for
# on-policy algorithms.
"postprocess_inputs": False,
# If positive, input batches will be shuffled via a sliding window buffer
# of this number of batches. Use this if the input data is not in random
# enough order. Input is delayed until the shuffle buffer is filled.
"shuffle_buffer_size": 0,
# Specify where experiences should be saved:
# - None: don't save any experiences
# - "logdir" to save to the agent log dir
# - a path/URI to save to a custom output directory (e.g., "s3://bucket/")
# - a function that returns a rllib.offline.OutputWriter
"output": None,
# What sample batch columns to LZ4 compress in the output data.
"output_compress_columns": ["obs", "new_obs"],
# Max output file size before rolling over to a new file.
"output_max_file_size": 64 * 1024 * 1024,
# === Settings for Multi-Agent Environments ===
"multiagent": {
# Map of type MultiAgentPolicyConfigDict from policy ids to tuples
# of (policy_cls, obs_space, act_space, config). This defines the
# observation and action spaces of the policies and any extra config.
"policies": {},
# Function mapping agent ids to policy ids.
"policy_mapping_fn": None,
# Optional list of policies to train, or None for all policies.
"policies_to_train": None,
# Optional function that can be used to enhance the local agent
# observations to include more state.
# See rllib/evaluation/observation_function.py for more info.
"observation_fn": None,
# When replay_mode=lockstep, RLlib will replay all the agent
# transitions at a particular timestep together in a batch. This allows
# the policy to implement differentiable shared computations between
# agents it controls at that timestep. When replay_mode=independent,
# transitions are replayed independently per policy.
"replay_mode": "independent",
},
# === Logger ===
# Define logger-specific configuration to be used inside Logger
# Default value None allows overwriting with nested dicts
"logger_config": None,
# === Replay Settings ===
# The number of contiguous environment steps to replay at once. This may
# be set to greater than 1 to support recurrent models.
"replay_sequence_length": 1,
}
# __sphinx_doc_end__
# yapf: enable
@DeveloperAPI
def with_common_config(
extra_config: PartialTrainerConfigDict) -> TrainerConfigDict:
"""Returns the given config dict merged with common agent confs.
Args:
extra_config (PartialTrainerConfigDict): A user defined partial config
which will get merged with COMMON_CONFIG and returned.
Returns:
TrainerConfigDict: The merged config dict resulting of COMMON_CONFIG
plus `extra_config`.
"""
return Trainer.merge_trainer_configs(
COMMON_CONFIG, extra_config, _allow_unknown_configs=True)
@PublicAPI
class Trainer(Trainable):
"""A trainer coordinates the optimization of one or more RL policies.
All RLlib trainers extend this base class, e.g., the A3CTrainer implements
the A3C algorithm for single and multi-agent training.
Trainer objects retain internal model state between calls to train(), so
you should create a new trainer instance for each training session.
Attributes:
env_creator (func): Function that creates a new training env.
config (obj): Algorithm-specific configuration data.
logdir (str): Directory in which training outputs should be placed.
"""
# Whether to allow unknown top-level config keys.
_allow_unknown_configs = False
# List of top-level keys with value=dict, for which new sub-keys are
# allowed to be added to the value dict.
_allow_unknown_subkeys = [
"tf_session_args", "local_tf_session_args", "env_config", "model",
"optimizer", "multiagent", "custom_resources_per_worker",
"evaluation_config", "exploration_config",
"extra_python_environs_for_driver", "extra_python_environs_for_worker"
]
# List of top level keys with value=dict, for which we always override the
# entire value (dict), iff the "type" key in that value dict changes.
_override_all_subkeys_if_type_changes = ["exploration_config"]
@PublicAPI
def __init__(self,
config: TrainerConfigDict = None,
env: str = None,
logger_creator: Callable[[], Logger] = None):
"""Initialize an RLLib trainer.
Args:
config (dict): Algorithm-specific configuration data.
env (str): Name of the environment to use. Note that this can also
be specified as the `env` key in config.
logger_creator (func): Function that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
"""
# User provided config (this is w/o the default Trainer's
# `COMMON_CONFIG` (see above)). Will get merged with COMMON_CONFIG
# in self.setup().
config = config or {}
# Trainers allow env ids to be passed directly to the constructor.
self._env_id = self._register_if_needed(env or config.get("env"))
# Create a default logger creator if no logger_creator is specified
if logger_creator is None:
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
logdir_prefix = "{}_{}_{}".format(self._name, self._env_id,
timestr)
def default_logger_creator(config):
"""Creates a Unified logger with a default logdir prefix
containing the agent name and the env id
"""
if not os.path.exists(DEFAULT_RESULTS_DIR):
os.makedirs(DEFAULT_RESULTS_DIR)
logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
return UnifiedLogger(config, logdir, loggers=None)
logger_creator = default_logger_creator
super().__init__(config, logger_creator)
@classmethod
@override(Trainable)
def default_resource_request(
cls, config: PartialTrainerConfigDict) -> Resources:
cf = dict(cls._default_config, **config)
Trainer._validate_config(cf)
num_workers = cf["num_workers"] + cf["evaluation_num_workers"]
# TODO(ekl): add custom resources here once tune supports them
return Resources(
cpu=cf["num_cpus_for_driver"],
gpu=cf["num_gpus"],
memory=cf["memory"],
object_store_memory=cf["object_store_memory"],
extra_cpu=cf["num_cpus_per_worker"] * num_workers,
extra_gpu=cf["num_gpus_per_worker"] * num_workers,
extra_memory=cf["memory_per_worker"] * num_workers,
extra_object_store_memory=cf["object_store_memory_per_worker"] *
num_workers)
@override(Trainable)
@PublicAPI
def train(self) -> ResultDict:
"""Overrides super.train to synchronize global vars."""
result = None
for _ in range(1 + MAX_WORKER_FAILURE_RETRIES):
try:
result = Trainable.train(self)
except RayError as e:
if self.config["ignore_worker_failures"]:
logger.exception(
"Error in train call, attempting to recover")
self._try_recover()
else:
logger.info(
"Worker crashed during call to train(). To attempt to "
"continue training without the failed worker, set "
"`'ignore_worker_failures': True`.")
raise e
except Exception as e:
time.sleep(0.5) # allow logs messages to propagate
raise e
else:
break
if result is None:
raise RuntimeError("Failed to recover from worker crash")
if hasattr(self, "workers") and isinstance(self.workers, WorkerSet):
self._sync_filters_if_needed(self.workers)
if self.config["evaluation_interval"] == 1 or (
self._iteration > 0 and self.config["evaluation_interval"]
and self._iteration % self.config["evaluation_interval"] == 0):
evaluation_metrics = self._evaluate()
assert isinstance(evaluation_metrics, dict), \
"_evaluate() needs to return a dict."
result.update(evaluation_metrics)
return result
def _sync_filters_if_needed(self, workers: WorkerSet):
if self.config.get("observation_filter", "NoFilter") != "NoFilter":
FilterManager.synchronize(
workers.local_worker().filters,
workers.remote_workers(),
update_remote=self.config["synchronize_filters"])
logger.debug("synchronized filters: {}".format(
workers.local_worker().filters))
@override(Trainable)
def log_result(self, result: ResultDict):
self.callbacks.on_train_result(trainer=self, result=result)
# log after the callback is invoked, so that the user has a chance
# to mutate the result
Trainable.log_result(self, result)
@override(Trainable)
def setup(self, config: PartialTrainerConfigDict):
env = self._env_id
if env:
config["env"] = env
# An already registered env.
if _global_registry.contains(ENV_CREATOR, env):
self.env_creator = _global_registry.get(ENV_CREATOR, env)
# A class specifier.
elif "." in env:
self.env_creator = \
lambda env_config: from_config(env, env_config)
# Try gym.
else:
import gym # soft dependency
self.env_creator = \
lambda env_config: gym.make(env, **env_config)
else:
self.env_creator = lambda env_config: None
# Merge the supplied config with the class default, but store the
# user-provided one.
self.raw_user_config = config
self.config = Trainer.merge_trainer_configs(self._default_config,
config)
# Check and resolve DL framework settings.
# Enable eager/tracing support.
if tf1 and self.config["framework"] in ["tf2", "tfe"]:
if self.config["framework"] == "tf2" and tfv < 2:
raise ValueError("`framework`=tf2, but tf-version is < 2.0!")
if not tf1.executing_eagerly():
tf1.enable_eager_execution()
logger.info("Executing eagerly, with eager_tracing={}".format(
self.config["eager_tracing"]))
if tf1 and not tf1.executing_eagerly() and \
self.config["framework"] != "torch":
logger.info("Tip: set framework=tfe or the --eager flag to enable "
"TensorFlow eager execution")
if self.config["normalize_actions"]:
inner = self.env_creator
def normalize(env):
import gym # soft dependency
if not isinstance(env, gym.Env):
raise ValueError(
"Cannot apply NormalizeActionActionWrapper to env of "
"type {}, which does not subclass gym.Env.", type(env))
return NormalizeActionWrapper(env)
self.env_creator = lambda env_config: normalize(inner(env_config))
Trainer._validate_config(self.config)
if not callable(self.config["callbacks"]):
raise ValueError(
"`callbacks` must be a callable method that "
"returns a subclass of DefaultCallbacks, got {}".format(
self.config["callbacks"]))
self.callbacks = self.config["callbacks"]()
log_level = self.config.get("log_level")
if log_level in ["WARN", "ERROR"]:
logger.info("Current log_level is {}. For more information, "
"set 'log_level': 'INFO' / 'DEBUG' or use the -v and "
"-vv flags.".format(log_level))
if self.config.get("log_level"):
logging.getLogger("ray.rllib").setLevel(self.config["log_level"])
def get_scope():
if tf1 and not tf1.executing_eagerly():
return tf1.Graph().as_default()
else:
return open(os.devnull) # fake a no-op scope
with get_scope():
self._init(self.config, self.env_creator)
# Evaluation setup.
if self.config.get("evaluation_interval"):
# Update env_config with evaluation settings:
extra_config = copy.deepcopy(self.config["evaluation_config"])
# Assert that user has not unset "in_evaluation".
assert "in_evaluation" not in extra_config or \
extra_config["in_evaluation"] is True
extra_config.update({
"batch_mode": "complete_episodes",
"rollout_fragment_length": 1,
"in_evaluation": True,
})
logger.debug(
"using evaluation_config: {}".format(extra_config))
self.evaluation_workers = self._make_workers(
env_creator=self.env_creator,
validate_env=None,
policy_class=self._policy_class,
config=merge_dicts(self.config, extra_config),
num_workers=self.config["evaluation_num_workers"])
self.evaluation_metrics = {}
@override(Trainable)
def cleanup(self):
if hasattr(self, "workers"):
self.workers.stop()
if hasattr(self, "optimizer") and self.optimizer:
self.optimizer.stop()
@override(Trainable)
def save_checkpoint(self, checkpoint_dir: str) -> str:
checkpoint_path = os.path.join(checkpoint_dir,
"checkpoint-{}".format(self.iteration))
pickle.dump(self.__getstate__(), open(checkpoint_path, "wb"))
return checkpoint_path
@override(Trainable)
def load_checkpoint(self, checkpoint_path: str):
extra_data = pickle.load(open(checkpoint_path, "rb"))
self.__setstate__(extra_data)
@DeveloperAPI
def _make_workers(
self, *, env_creator: Callable[[EnvContext], EnvType],
validate_env: Optional[Callable[[EnvType, EnvContext], None]],
policy_class: Type[Policy], config: TrainerConfigDict,
num_workers: int) -> WorkerSet:
"""Default factory method for a WorkerSet running under this Trainer.
Override this method by passing a custom `make_workers` into
`build_trainer`.
Args:
env_creator (callable): A function that return and Env given an env
config.
validate_env (Optional[Callable[[EnvType, EnvContext], None]]):
Optional callable to validate the generated environment (only
on worker=0).
policy (Type[Policy]): The Policy class to use for creating the
policies of the workers.
config (TrainerConfigDict): The Trainer's config.
num_workers (int): Number of remote rollout workers to create.
0 for local only.
Returns:
WorkerSet: The created WorkerSet.
"""
return WorkerSet(
env_creator=env_creator,
validate_env=validate_env,
policy_class=policy_class,
trainer_config=config,
num_workers=num_workers,
logdir=self.logdir)
@DeveloperAPI
def _init(self, config: TrainerConfigDict,
env_creator: Callable[[EnvContext], EnvType]):
"""Subclasses should override this for custom initialization."""
raise NotImplementedError
@DeveloperAPI
def _evaluate(self) -> dict:
"""Evaluates current policy under `evaluation_config` settings.
Note that this default implementation does not do anything beyond
merging evaluation_config with the normal trainer config.
"""
self._before_evaluate()
# Broadcast the new policy weights to all evaluation workers.
logger.info("Synchronizing weights to evaluation workers.")
weights = ray.put(self.workers.local_worker().save())
self.evaluation_workers.foreach_worker(
lambda w: w.restore(ray.get(weights)))
self._sync_filters_if_needed(self.evaluation_workers)
if self.config["custom_eval_function"]:
logger.info("Running custom eval function {}".format(
self.config["custom_eval_function"]))
metrics = self.config["custom_eval_function"](
self, self.evaluation_workers)
if not metrics or not isinstance(metrics, dict):
raise ValueError("Custom eval function must return "
"dict of metrics, got {}.".format(metrics))
else:
logger.info("Evaluating current policy for {} episodes.".format(
self.config["evaluation_num_episodes"]))
if self.config["evaluation_num_workers"] == 0:
for _ in range(self.config["evaluation_num_episodes"]):
self.evaluation_workers.local_worker().sample()
else:
num_rounds = int(
math.ceil(self.config["evaluation_num_episodes"] /
self.config["evaluation_num_workers"]))
num_workers = len(self.evaluation_workers.remote_workers())
num_episodes = num_rounds * num_workers
for i in range(num_rounds):
logger.info("Running round {} of parallel evaluation "
"({}/{} episodes)".format(
i, (i + 1) * num_workers, num_episodes))
ray.get([
w.sample.remote()
for w in self.evaluation_workers.remote_workers()
])
metrics = collect_metrics(self.evaluation_workers.local_worker(),
self.evaluation_workers.remote_workers())
return {"evaluation": metrics}
@DeveloperAPI
def _before_evaluate(self):
"""Pre-evaluation callback."""
pass
@PublicAPI
def compute_action(self,
observation: TensorStructType,
state: List[TensorStructType] = None,
prev_action: TensorStructType = None,
prev_reward: float = None,
info: EnvInfoDict = None,
policy_id: PolicyID = DEFAULT_POLICY_ID,
full_fetch: bool = False,
explore: bool = None) -> TensorStructType:
"""Computes an action for the specified policy on the local Worker.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_actions() on it directly.
Args:
observation (TensorStructType): observation from the environment.
state (List[TensorStructType]): RNN hidden state, if any. If state
is not None, then all of compute_single_action(...) is returned
(computed action, rnn state(s), logits dictionary).
Otherwise compute_single_action(...)[0] is returned
(computed action).
prev_action (TensorStructType): Previous action value, if any.
prev_reward (float): Previous reward, if any.
info (EnvInfoDict): info object, if any
policy_id (PolicyID): Policy to query (only applies to
multi-agent).
full_fetch (bool): Whether to return extra action fetch results.
This is always set to True if RNN state is specified.
explore (bool): Whether to pick an exploitation or exploration
action (default: None -> use self.config["explore"]).
Returns:
any: The computed action if full_fetch=False, or
tuple: The full output of policy.compute_actions() if
full_fetch=True or we have an RNN-based Policy.
"""
if state is None:
state = []
preprocessed = self.workers.local_worker().preprocessors[
policy_id].transform(observation)
filtered_obs = self.workers.local_worker().filters[policy_id](
preprocessed, update=False)
result = self.get_policy(policy_id).compute_single_action(
filtered_obs,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"],
explore=explore)
if state or full_fetch:
return result
else:
return result[0] # backwards compatibility
def compute_actions(self,
observations,
state=None,
prev_action=None,
prev_reward=None,
info=None,
policy_id=DEFAULT_POLICY_ID,
full_fetch=False,
explore=None):
"""Computes an action for the specified policy on the local Worker.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_actions() on it directly.
Args:
observation (obj): observation from the environment.
state (dict): RNN hidden state, if any. If state is not None,
then all of compute_single_action(...) is returned
(computed action, rnn state(s), logits dictionary).
Otherwise compute_single_action(...)[0] is returned
(computed action).
prev_action (obj): previous action value, if any
prev_reward (int): previous reward, if any
info (dict): info object, if any
policy_id (str): Policy to query (only applies to multi-agent).
full_fetch (bool): Whether to return extra action fetch results.
This is always set to True if RNN state is specified.
explore (bool): Whether to pick an exploitation or exploration
action (default: None -> use self.config["explore"]).
Returns:
any: The computed action if full_fetch=False, or
tuple: The full output of policy.compute_actions() if
full_fetch=True or we have an RNN-based Policy.
"""
# Preprocess obs and states
stateDefined = state is not None
policy = self.get_policy(policy_id)
filtered_obs, filtered_state = [], []
for agent_id, ob in observations.items():
worker = self.workers.local_worker()
preprocessed = worker.preprocessors[policy_id].transform(ob)
filtered = worker.filters[policy_id](preprocessed, update=False)
filtered_obs.append(filtered)
if state is None:
continue
elif agent_id in state:
filtered_state.append(state[agent_id])
else:
filtered_state.append(policy.get_initial_state())
# Batch obs and states
obs_batch = np.stack(filtered_obs)
if state is None:
state = []
else:
state = list(zip(*filtered_state))
state = [np.stack(s) for s in state]
# Batch compute actions
actions, states, infos = policy.compute_actions(
obs_batch,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"],
explore=explore)
# Unbatch actions for the environment
atns, actions = space_utils.unbatch(actions), {}
for key, atn in zip(observations, atns):
actions[key] = atn
# Unbatch states into a dict
unbatched_states = {}
for idx, agent_id in enumerate(observations):
unbatched_states[agent_id] = [s[idx] for s in states]
# Return only actions or full tuple
if stateDefined or full_fetch:
return actions, unbatched_states, infos
else:
return actions
@property
def _name(self) -> str:
"""Subclasses should override this to declare their name."""
raise NotImplementedError
@property
def _default_config(self) -> TrainerConfigDict:
"""Subclasses should override this to declare their default config."""
raise NotImplementedError
@PublicAPI
def get_policy(self, policy_id: PolicyID = DEFAULT_POLICY_ID) -> Policy:
"""Return policy for the specified id, or None.
Args:
policy_id (str): id of policy to return.
"""
return self.workers.local_worker().get_policy(policy_id)
@PublicAPI
def get_weights(self, policies: List[PolicyID] = None) -> dict:
"""Return a dictionary of policy ids to weights.
Args:
policies (list): Optional list of policies to return weights for,
or None for all policies.
"""
return self.workers.local_worker().get_weights(policies)
@PublicAPI
def set_weights(self, weights: Dict[PolicyID, dict]):
"""Set policy weights by policy id.
Args:
weights (dict): Map of policy ids to weights to set.
"""
self.workers.local_worker().set_weights(weights)
@DeveloperAPI
def export_policy_model(self,
export_dir: str,
policy_id: PolicyID = DEFAULT_POLICY_ID):
"""Export policy model with given policy_id to local directory.
Args:
export_dir (string): Writable local directory.
policy_id (string): Optional policy id to export.
Example:
>>> trainer = MyTrainer()
>>> for _ in range(10):
>>> trainer.train()
>>> trainer.export_policy_model("/tmp/export_dir")
"""
self.workers.local_worker().export_policy_model(export_dir, policy_id)
@DeveloperAPI
def export_policy_checkpoint(self,
export_dir: str,
filename_prefix: str = "model",
policy_id: PolicyID = DEFAULT_POLICY_ID):
"""Export tensorflow policy model checkpoint to local directory.
Args:
export_dir (string): Writable local directory.
filename_prefix (string): file name prefix of checkpoint files.
policy_id (string): Optional policy id to export.
Example:
>>> trainer = MyTrainer()
>>> for _ in range(10):
>>> trainer.train()
>>> trainer.export_policy_checkpoint("/tmp/export_dir")
"""
self.workers.local_worker().export_policy_checkpoint(
export_dir, filename_prefix, policy_id)
@DeveloperAPI
def import_policy_model_from_h5(self,
import_file: str,
policy_id: PolicyID = DEFAULT_POLICY_ID):
"""Imports a policy's model with given policy_id from a local h5 file.
Args:
import_file (str): The h5 file to import from.
policy_id (string): Optional policy id to import into.
Example:
>>> trainer = MyTrainer()
>>> trainer.import_policy_model_from_h5("/tmp/weights.h5")
>>> for _ in range(10):
>>> trainer.train()
"""
self.workers.local_worker().import_policy_model_from_h5(
import_file, policy_id)
@DeveloperAPI
def collect_metrics(self,
selected_workers: List["ActorHandle"] = None) -> dict:
"""Collects metrics from the remote workers of this agent.
This is the same data as returned by a call to train().
"""
return self.optimizer.collect_metrics(
self.config["collect_metrics_timeout"],
min_history=self.config["metrics_smoothing_episodes"],
selected_workers=selected_workers)
@classmethod
def resource_help(cls, config: TrainerConfigDict) -> str:
return ("\n\nYou can adjust the resource requests of RLlib agents by "
"setting `num_workers`, `num_gpus`, and other configs. See "
"the DEFAULT_CONFIG defined by each agent for more info.\n\n"
"The config of this agent is: {}".format(config))
@classmethod
def merge_trainer_configs(cls,
config1: TrainerConfigDict,
config2: PartialTrainerConfigDict,
_allow_unknown_configs: Optional[bool] = None
) -> TrainerConfigDict:
config1 = copy.deepcopy(config1)
if "callbacks" in config2 and type(config2["callbacks"]) is dict:
legacy_callbacks_dict = config2["callbacks"]
def make_callbacks():
# Deprecation warning will be logged by DefaultCallbacks.
return DefaultCallbacks(
legacy_callbacks_dict=legacy_callbacks_dict)
config2["callbacks"] = make_callbacks
if _allow_unknown_configs is None:
_allow_unknown_configs = cls._allow_unknown_configs
return deep_update(config1, config2, _allow_unknown_configs,
cls._allow_unknown_subkeys,
cls._override_all_subkeys_if_type_changes)
@staticmethod
def _validate_config(config: PartialTrainerConfigDict):
if config.get("_use_trajectory_view_api") and \
config.get("framework") != "torch":
logger.info(
"`_use_trajectory_view_api` only supported for PyTorch so "
"far! Will run w/o.")
config["_use_trajectory_view_api"] = False
elif not config.get("_use_trajectory_view_api") and \
config.get("model", {}).get("_time_major"):
raise ValueError("`model._time_major` only supported "
"iff `_use_trajectory_view_api` is True!")
if type(config["input_evaluation"]) != list:
raise ValueError(
"`input_evaluation` must be a list of strings, got {}".format(
config["input_evaluation"]))
def _try_recover(self):
"""Try to identify and remove any unhealthy workers.
This method is called after an unexpected remote error is encountered
from a worker. It issues check requests to all current workers and
removes any that respond with error. If no healthy workers remain,
an error is raised.
"""
assert hasattr(self, "execution_plan")
workers = self.workers
logger.info("Health checking all workers...")
checks = []
for ev in workers.remote_workers():
_, obj_ref = ev.sample_with_count.remote()
checks.append(obj_ref)
healthy_workers = []
for i, obj_ref in enumerate(checks):
w = workers.remote_workers()[i]
try:
ray.get(obj_ref)
healthy_workers.append(w)
logger.info("Worker {} looks healthy".format(i + 1))
except RayError:
logger.exception("Removing unhealthy worker {}".format(i + 1))
try:
w.__ray_terminate__.remote()
except Exception:
logger.exception("Error terminating unhealthy worker")
if len(healthy_workers) < 1:
raise RuntimeError(
"Not enough healthy workers remain to continue.")
logger.warning("Recreating execution plan after failure")
workers.reset(healthy_workers)
self.train_exec_impl = self.execution_plan(workers, self.config)
@override(Trainable)
def _export_model(self, export_formats: List[str],
export_dir: str) -> Dict[str, str]:
ExportFormat.validate(export_formats)
exported = {}
if ExportFormat.CHECKPOINT in export_formats:
path = os.path.join(export_dir, ExportFormat.CHECKPOINT)
self.export_policy_checkpoint(path)
exported[ExportFormat.CHECKPOINT] = path
if ExportFormat.MODEL in export_formats:
path = os.path.join(export_dir, ExportFormat.MODEL)
self.export_policy_model(path)
exported[ExportFormat.MODEL] = path
return exported
def import_model(self, import_file: str):
"""Imports a model from import_file.
Note: Currently, only h5 files are supported.
Args:
import_file (str): The file to import the model from.
Returns:
A dict that maps ExportFormats to successfully exported models.
"""
# Check for existence.
if not os.path.exists(import_file):
raise FileNotFoundError(
"`import_file` '{}' does not exist! Can't import Model.".
format(import_file))
# Get the format of the given file.
import_format = "h5" # TODO(sven): Support checkpoint loading.
ExportFormat.validate([import_format])
if import_format != ExportFormat.H5:
raise NotImplementedError
else:
return self.import_policy_model_from_h5(import_file)
def __getstate__(self) -> dict:
state = {}
if hasattr(self, "workers"):
state["worker"] = self.workers.local_worker().save()
if hasattr(self, "optimizer") and hasattr(self.optimizer, "save"):
state["optimizer"] = self.optimizer.save()
return state
def __setstate__(self, state: dict):
if "worker" in state:
self.workers.local_worker().restore(state["worker"])
remote_state = ray.put(state["worker"])
for r in self.workers.remote_workers():
r.restore.remote(remote_state)
if "optimizer" in state:
self.optimizer.restore(state["optimizer"])
@staticmethod
def with_updates(**overrides) -> Type["Trainer"]:
raise NotImplementedError(
"`with_updates` may only be called on Trainer sub-classes "
"that were generated via the `ray.rllib.agents.trainer_template."
"build_trainer()` function!")
def _register_if_needed(self, env_object: Union[str, EnvType]):
if isinstance(env_object, str):
return env_object
elif isinstance(env_object, type):
name = env_object.__name__
register_env(name, lambda config: env_object(config))
return name
raise ValueError(
"{} is an invalid env specification. ".format(env_object) +
"You can specify a custom env as either a class "
"(e.g., YourEnvCls) or a registered env id (e.g., \"your_env\").")
|
the-stack_0_9785 | from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.GE(x_z, mgr.Plus(z, y)))
loc1 = Location(env, mgr.GE(z, i_3), mgr.GE(x, i_0))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, i_0)))
h_z = Hint("h_z2", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_0))
loc0.set_progress(1, mgr.Equals(x_z, z))
loc1 = Location(env, mgr.GE(z, i_0))
loc1.set_progress(0, mgr.Equals(x_z, mgr.Plus(z, i_3)))
h_z = Hint("h_z4", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
return frozenset(res)
|
the-stack_0_9786 | import nltk
import discord
from discord.ext import commands
from nltk.sentiment import SentimentIntensityAnalyzer
import database as db
import variables as var
from functions import get_prefix
from ext.permissions import has_command_permission
nltk.download('vader_lexicon')
sia = SentimentIntensityAnalyzer()
class Karma(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
"""Simple check to see if this cog (plugin) is enabled."""
guild_doc = await db.PLUGINS.find_one({"_id": ctx.guild.id})
if guild_doc.get("Karma"):
return True
else:
await ctx.send(
embed=discord.Embed(
description=(
f"{var.E_DISABLE} The Karma plugin is"
" disabled in this server"
),
color=var.C_ORANGE
)
)
@commands.command()
@has_command_permission()
async def karma(self, ctx, karma_user: discord.User = None):
user = ctx.author if karma_user is None else karma_user
guild_col = db.KARMA_DATABASE[str(ctx.guild.id)]
userdata = await guild_col.find_one({"_id": user.id})
karmas = [
x async for x in guild_col.find(
{
"_id": {"$ne": 0},
# Removing ID 0 (Config doc, unrelated to user xp)
}
).sort("karma", -1)
]
if userdata is None:
await ctx.send("This user does not have any karma yet...")
else:
# Index starts with zero
position = karmas.index(userdata) + 1
embed = discord.Embed(
title=f"Karma for {user.name}",
color=var.C_MAIN
).add_field(
name="Karma", value=userdata["karma"]
).add_field(
name="Position", value=f"{position}/{len(karmas)}", inline=False
).set_thumbnail(url=user.avatar_url)
total_karma = sum(i["karma"] for i in karmas)
average = total_karma/len(karmas)
if userdata["karma"] > average:
embed.description = (
f"Your karma is better than the average {user.name}! :)"
)
if userdata["karma"] < average:
embed.description = (
f"Your karma is lower than the average {user.name}, "
f"is it because you don't talk much or you are not nice "
f"enough? :eyes:"
)
if position == 1:
embed.description = (
f"Woohoo {user.name}, you are the nicest "
f"person in the server!"
)
await ctx.channel.send(embed=embed)
@commands.command(name="karmaboard", aliases=["kb"])
@has_command_permission()
async def karma_board(self, ctx):
guild_col = db.KARMA_DATABASE[str(ctx.guild.id)]
karmas = [
# Removing ID 0 (Config doc, unrelated to user xp)
x async for x in
guild_col.find({"_id": {"$ne": 0}}).sort("karma", -1)
]
if len(karmas) < 10:
exact_pages = 1
else:
exact_pages = len(karmas) / 10
if type(exact_pages) != int:
all_pages = round(exact_pages) + 1
else:
all_pages = exact_pages
total_karma = 0
for i in karmas:
total_karma += i["karma"]
average = total_karma/len(karmas)
embed = discord.Embed(
title=f"Karma Board",
description=f"The average karma in this server is **{average}**",
color=var.C_BLUE
).set_thumbnail(url=ctx.guild.icon_url)
count = 0
for i in karmas:
count += 1
try:
user = self.bot.get_user(i.get("_id"))
karma = i.get("karma")
embed.add_field(
name=f"{count}: {user}",
value=f"Total Karma: {karma}",
inline=False
)
except Exception:
print(f"Not found {i}")
if count == 10:
break
embed.set_footer(text=f"Page 1/{all_pages}")
bot_msg = await ctx.send(embed=embed)
await bot_msg.add_reaction("◀️")
await bot_msg.add_reaction("⬅️")
await bot_msg.add_reaction("➡️")
await bot_msg.add_reaction("▶️")
def reaction_check(r, u):
return u == ctx.author and r.message == bot_msg
async def pagination(ctx, current_page, embed, GuildCol, all_pages):
page_rn = current_page + 1
embed.set_footer(text=f"Page {page_rn}/{all_pages}")
embed.clear_fields()
rank_count = current_page * 10
user_amount = current_page*10
karmas = [
x async for x in GuildCol.find(
# Removing ID 0 (Config doc, unrelated to user xp)
{"_id": { "$ne": 0 }}
).sort("karma", -1).limit(user_amount)
]
for i in karmas:
rank_count += 1
user = self.bot.get_user(i.get("_id"))
karma = i.get("karma")
embed.add_field(
name=f"{rank_count}: {user}",
value=f"Total Karma: {karma}",
inline=False
)
if rank_count == current_page *10 + 10:
break
current_page = 0
while True:
reaction, user = await self.bot.wait_for(
"reaction_add", check=reaction_check
)
if str(reaction.emoji) == "◀️":
try:
await bot_msg.remove_reaction("◀️", ctx.author)
except discord.Forbidden:
pass
current_page = 0
await pagination(ctx, current_page, embed, guild_col, all_pages)
await bot_msg.edit(embed=embed)
if str(reaction.emoji) == "➡️":
try:
await bot_msg.remove_reaction("➡️", ctx.author)
except discord.Forbidden:
pass
current_page += 1
if current_page > all_pages:
current_page -= 1
await pagination(ctx, current_page, embed, guild_col, all_pages)
await bot_msg.edit(embed=embed)
if str(reaction.emoji) == "⬅️":
try:
await bot_msg.remove_reaction("⬅️", ctx.author)
except discord.Forbidden:
pass
current_page -= 1
if current_page < 0:
current_page += 1
await pagination(ctx, current_page, embed, guild_col, all_pages)
await bot_msg.edit(embed=embed)
if str(reaction.emoji) == "▶️":
try:
await bot_msg.remove_reaction("▶️", ctx.author)
except discord.Forbidden:
pass
current_page = all_pages-1
await pagination(ctx, current_page, embed, guild_col, all_pages)
await bot_msg.edit(embed=embed)
@commands.command(name="kblacklist")
@has_command_permission()
async def k_blacklist(self, ctx, channel: discord.TextChannel = None):
if channel is not None:
guild_col = db.KARMA_DATABASE[(str(ctx.guild.id))]
settings = await guild_col.find_one({"_id": 0})
new_settings = settings.get("blacklists").copy()
if channel.id in new_settings:
await ctx.send("This channel is already blacklisted")
else:
new_settings.append(channel.id)
new_data = {
"$set": {
"blacklists": new_settings
}
}
await guild_col.update_one(settings, new_data)
await ctx.send(
embed=discord.Embed(
description=(
f"{channel.mention} has been blacklisted, "
f"hence users won't gain any karma in that channel."
),
color=var.C_GREEN
)
)
else:
await ctx.send(
embed=discord.Embed(
description=(
"🚫 You need to define the channel to blacklist it!"
),
color=var.C_RED
).add_field(
name="Format",
value=f"```{await get_prefix(ctx)}kblacklist <#channel>```"
)
)
@commands.command(name="kwhitelist")
@has_command_permission()
async def k_whitelist(self, ctx, channel: discord.TextChannel = None):
if channel is not None:
guild_col = db.KARMA_DATABASE[(str(ctx.guild.id))]
settings = await guild_col.find_one({"_id": 0})
new_settings = settings.get("blacklists").copy()
if channel.id not in new_settings:
await ctx.send("This channel is not blacklisted")
else:
new_settings.remove(channel.id)
new_data = {
"$set": {
"blacklists": new_settings
}
}
await guild_col.update_one(settings, new_data)
await ctx.send(
embed=discord.Embed(
description=(
f"{channel.mention} has been whitelisted, hence "
"users would be able to gain karma again in that "
"channel."
),
color=var.C_GREEN
)
)
else:
await ctx.send(
embed=discord.Embed(
description=(
"🚫 You need to define the channel to whitelist it!"
),
color=var.C_RED
).add_field(
name="Format",
value=f"```{await get_prefix(ctx)}kwhitelist <#channel>```"
)
)
@commands.Cog.listener()
async def on_message(self, message):
if not message.guild:
return
plugin_doc = await db.PLUGINS.find_one({"_id": message.guild.id})
guild_col = db.KARMA_DATABASE[str(message.guild.id)]
settings_doc = await guild_col.find_one({"_id": 0})
if plugin_doc["Karma"] and not message.author.bot:
if not message.channel.id in settings_doc["blacklists"]:
userdata = await guild_col.find_one({"_id": message.author.id})
polarity = sia.polarity_scores(message.content)
result = max(polarity, key=polarity.get)
def get_karma():
if result == "neg":
return -polarity[result]
elif result == "pos":
return polarity[result]
return 0
if userdata is None:
await guild_col.insert_one(
{"_id": message.author.id, "karma": get_karma()}
)
else:
new_karma = get_karma()
new_karma += userdata["karma"]
await guild_col.update_one(
userdata, {"$set": {"karma": new_karma}}
)
def setup(bot):
bot.add_cog(Karma(bot))
|
the-stack_0_9787 | import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("categories", [["b", "a", "c"], ["a", "b", "c", "d"]])
def test_factorize(categories, ordered):
cat = pd.Categorical(
["b", "b", "a", "c", None], categories=categories, ordered=ordered
)
codes, uniques = pd.factorize(cat)
expected_codes = np.array([0, 0, 1, 2, -1], dtype=np.intp)
expected_uniques = pd.Categorical(
["b", "a", "c"], categories=categories, ordered=ordered
)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort():
cat = pd.Categorical(["b", "b", None, "a"])
codes, uniques = pd.factorize(cat, sort=True)
expected_codes = np.array([1, 1, -1, 0], dtype=np.intp)
expected_uniques = pd.Categorical(["a", "b"])
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort_ordered():
cat = pd.Categorical(
["b", "b", None, "a"], categories=["c", "b", "a"], ordered=True
)
codes, uniques = pd.factorize(cat, sort=True)
expected_codes = np.array([0, 0, -1, 1], dtype=np.intp)
expected_uniques = pd.Categorical(
["b", "a"], categories=["c", "b", "a"], ordered=True
)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_isin_cats():
# GH2003
cat = pd.Categorical(["a", "b", np.nan])
result = cat.isin(["a", np.nan])
expected = np.array([True, False, True], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
result = cat.isin(["a", "c"])
expected = np.array([True, False, False], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], pd.Series(), np.array([])])
def test_isin_empty(empty):
s = pd.Categorical(["a", "b"])
expected = np.array([False, False], dtype=bool)
result = s.isin(empty)
tm.assert_numpy_array_equal(expected, result)
class TestTake:
# https://github.com/pandas-dev/pandas/issues/20664
def test_take_warns(self):
cat = pd.Categorical(["a", "b"])
with tm.assert_produces_warning(FutureWarning):
cat.take([0, -1])
def test_take_positive_no_warning(self):
cat = pd.Categorical(["a", "b"])
with tm.assert_produces_warning(None):
cat.take([0, 0])
def test_take_bounds(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical(["a", "b", "a"])
with pytest.raises(IndexError):
cat.take([4, 5], allow_fill=allow_fill)
def test_take_empty(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical([], categories=["a", "b"])
with pytest.raises(IndexError):
cat.take([0], allow_fill=allow_fill)
def test_positional_take(self, ordered_fixture):
cat = pd.Categorical(
["a", "a", "b", "b"], categories=["b", "a"], ordered=ordered_fixture
)
result = cat.take([0, 1, 2], allow_fill=False)
expected = pd.Categorical(
["a", "a", "b"], categories=cat.categories, ordered=ordered_fixture
)
tm.assert_categorical_equal(result, expected)
def test_positional_take_unobserved(self, ordered_fixture):
cat = pd.Categorical(
["a", "b"], categories=["a", "b", "c"], ordered=ordered_fixture
)
result = cat.take([1, 0], allow_fill=False)
expected = pd.Categorical(
["b", "a"], categories=cat.categories, ordered=ordered_fixture
)
tm.assert_categorical_equal(result, expected)
def test_take_allow_fill(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(["a", "a", "b"])
result = cat.take([0, -1, -1], allow_fill=True)
expected = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_take_fill_with_negative_one(self):
# -1 was a category
cat = pd.Categorical([-1, 0, 1])
result = cat.take([0, -1, 1], allow_fill=True, fill_value=-1)
expected = pd.Categorical([-1, -1, 0], categories=[-1, 0, 1])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(["a", "b", "c"])
result = cat.take([0, 1, -1], fill_value="a", allow_fill=True)
expected = pd.Categorical(["a", "b", "a"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value_new_raises(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(["a", "b", "c"])
xpr = r"'fill_value' \('d'\) is not in this Categorical's categories."
with pytest.raises(TypeError, match=xpr):
cat.take([0, 1, -1], fill_value="d", allow_fill=True)
|
the-stack_0_9788 | # ---------------------------------------------------------------------
# Eltex.MA4000.get_interfaces
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfaces import IGetInterfaces
from noc.core.ip import IPv4
from noc.core.text import parse_table
from noc.core.validators import is_int
class Script(BaseScript):
name = "Eltex.MA4000.get_interfaces"
interface = IGetInterfaces
rx_mgmt = re.compile(
r"^\s+ip\s+(?P<ip>\S+)\s*\n"
r"^\s+mask\s+(?P<mask>\S+)\s*\n"
r"^\s+gateway.+\n"
r"^\s+vlan\s+(?P<vlan_id>\d+)\s*\n",
re.MULTILINE,
)
rx_mac = re.compile(r"^\s*\*\d\s+\S+\s+MASTER\s+\d+\s+(?P<mac>\S+)", re.MULTILINE)
def create_iface(self, i, iftype):
ifname = " ".join(i[0].split())
if not ifname.startswith(iftype):
return None
pvid = i[1]
if i[4] not in ["none", "N/S"]:
tagged = self.expand_rangelist(i[4])
else:
tagged = []
untagged = i[5] if is_int(i[5]) else pvid
iface = {
"name": ifname,
"type": "physical",
"subinterfaces": [
{"name": ifname, "enabled_afi": ["BRIDGE"], "untagged_vlan": untagged}
],
}
if tagged:
iface["subinterfaces"][0]["tagged_vlans"] = tagged
return iface
def execute(self):
interfaces = []
lldp = []
c = self.cli("show lldp configuration")
if "LLDP state: Enabled" in c:
t = parse_table(c, allow_wrap=True, footer="PD - port description")
for i in t:
ifname = " ".join(i[0].split())
if "transmit" in i[1] or "receive" in i[1]:
lldp += [ifname]
c = self.cli("show interface front-port all vlans")
t = parse_table(c, allow_wrap=True, footer="N/A - interface doesn't exist")
for i in t:
iface = self.create_iface(i, "front-port")
if iface is not None:
if iface["name"] in lldp:
iface["enabled_protocols"] = ["LLDP"]
interfaces += [iface]
for slot in range(0, 16):
c = self.cli("show interface plc-pon-port %d/0-7 vlans" % slot)
t = parse_table(c, allow_wrap=True, footer="dummy footer")
for i in t:
iface = self.create_iface(i, "plc-pon-port")
if iface is not None:
interfaces += [iface]
c = self.cli("show interface slot-channel 0-15 vlans")
t = parse_table(c, allow_wrap=True, footer="N/A - interface doesn't exist")
for i in t:
iface = self.create_iface(i, "slot-channel")
if iface is not None:
interfaces += [iface]
c = self.cli("show interface slot-port all vlans")
t = parse_table(c, allow_wrap=True, footer="N/A - interface doesn't exist")
for i in t:
iface = self.create_iface(i, "slot-port")
if iface is not None:
interfaces += [iface]
c = self.cli("show management")
match = self.rx_mgmt.search(c)
ip_address = "%s/%s" % (match.group("ip"), IPv4.netmask_to_len(match.group("mask")))
iface = {
"name": "management",
"type": "SVI",
"subinterfaces": [
{
"name": "management",
"enabled_afi": ["IPv4"],
"ipv4_addresses": [ip_address],
"vlan_ids": int(match.group("vlan_id")),
}
],
}
c = self.cli("show stack")
match = self.rx_mac.search(c)
iface["mac"] = match.group("mac")
iface["subinterfaces"][0]["mac"] = match.group("mac")
interfaces += [iface]
portchannels = self.scripts.get_portchannel()
for pc in portchannels:
c = self.cli("show interface %s vlans" % pc["interface"])
t = parse_table(c, allow_wrap=True, footer="N/A - interface doesn't exist")
for i in t:
iface = self.create_iface(i, "port-channel")
if iface is not None:
has_lacp = False
iface["type"] = "aggregated"
if pc["type"] == "L":
has_lacp = True
iface["enabled_protocols"] = ["LACP"]
interfaces += [iface]
for member in pc["members"]:
for i in interfaces:
if member == i["name"]:
i["aggregated_interface"] = pc["interface"]
if has_lacp:
if i["enabled_protocols"]:
i["enabled_protocols"] += ["LACP"]
else:
i["enabled_protocols"] = ["LACP"]
break
return [{"interfaces": interfaces}]
|
the-stack_0_9789 | from datetime import date
from mockito import *
from django.test.client import Client
from django.utils import unittest
from molly.apps.places.models import Entity, Journey
from molly.apps.places.providers.cif import CifTimetableProvider
import httplib
class AtcoCifTestCase(unittest.TestCase):
def testBankHolidays(self):
j = Journey()
# 10 bank hols in 2010
hols = j.get_bank_holidays(2010)
self.assertEquals(len(hols), 10)
self.assertTrue(date(2010, 1, 1) in hols) # New Year's Day
self.assertTrue(date(2010, 4, 2) in hols) # Good Friday
self.assertTrue(date(2010, 4, 5) in hols) # Easter Monday
self.assertTrue(date(2010, 5, 3) in hols) # Early May Bank Holiday
self.assertTrue(date(2010, 5, 31) in hols) # Spring Bank Holiday
self.assertTrue(date(2010, 8, 30) in hols) # Summer Bank Holiday
self.assertTrue(date(2010, 12, 25) in hols) # Christmas Day
self.assertTrue(date(2010, 12, 26) in hols) # Boxing Day
self.assertTrue(date(2010, 12, 27) in hols) # Christmas Day (in lieu)
self.assertTrue(date(2010, 12, 28) in hols) # Boxing Day (in lieu)
# 11 bank hols in 2011
hols = j.get_bank_holidays(2011)
self.assertEquals(len(hols), 11)
self.assertTrue(date(2011, 1, 1) in hols) # New Year's Day
self.assertTrue(date(2011, 1, 3) in hols) # New Year's Day (in lieu)
self.assertTrue(date(2011, 4, 22) in hols) # Good Friday
self.assertTrue(date(2011, 4, 25) in hols) # Easter Monday
self.assertTrue(date(2011, 4, 29) in hols) # Royal Wedding
self.assertTrue(date(2011, 5, 2) in hols) # Early May Bank Holiday
self.assertTrue(date(2011, 5, 30) in hols) # Spring Bank Holiday
self.assertTrue(date(2011, 8, 29) in hols) # Summer Bank Holiday
self.assertTrue(date(2011, 12, 25) in hols) # Christmas Day
self.assertTrue(date(2011, 12, 26) in hols) # Christmas Day (in lieu)
self.assertTrue(date(2011, 12, 27) in hols) # Boxing Day
# 10 bank hols in 2012
hols = j.get_bank_holidays(2012)
self.assertEquals(len(hols), 10)
self.assertTrue(date(2012, 1, 1) in hols) # New Year's Day
self.assertTrue(date(2012, 1, 2) in hols) # New Year's Day (in lieu)
self.assertTrue(date(2012, 4, 6) in hols) # Good Friday
self.assertTrue(date(2012, 4, 9) in hols) # Easter Monday
self.assertTrue(date(2012, 5, 7) in hols) # Early May Bank Holiday
self.assertTrue(date(2012, 6, 4) in hols) # Spring Bank Holiday
self.assertTrue(date(2012, 6, 5) in hols) # Diamond Jubilee
self.assertTrue(date(2012, 8, 27) in hols) # Summer Bank Holiday
self.assertTrue(date(2012, 12, 25) in hols) # Christmas Day
self.assertTrue(date(2012, 12, 26) in hols) # Boxing Day
class LocationTestCase(unittest.TestCase):
def testLocationRequiredViewSubclass(self):
c = Client()
path = '/places/nearby/'
latitude = 51.752274
longitude = -1.255875
accuracy = 10
# Trying to get a LocationRequiredView with no location set should
# cause a redirect
response = c.get(path)
self.assertEquals(response.status_code, httplib.SEE_OTHER)
# Trying to get a LocationRequiredView with latitude and longitude
# query params returns OK
response = c.get(path, data={ 'latitude':latitude, 'longitude': longitude })
self.assertEquals(response.status_code, httplib.OK)
# Trying to get a LocationRequiredView with latitude, longitude
# and accuracy query params returns OK
response = c.get(path, data={ 'latitude':latitude, 'longitude': longitude, 'accuracy': accuracy })
self.assertEquals(response.status_code, httplib.OK)
# Trying to get a LocationRequiredView with an X-Current-Location (no accuracy)
# HTTP header returns OK
response = c.get(path, HTTP_X_CURRENT_LOCATION="latitude=%.6f,longitude=%.6f" % (latitude, longitude))
self.assertEquals(response.status_code, httplib.OK)
# Trying to get a LocationRequiredView with an X-Current-Location (including accuracy)
# HTTP header returns OK
response = c.get(path, HTTP_X_CURRENT_LOCATION="latitude=%.6f,longitude=%.6f,accuracy=%d" % (latitude, longitude, accuracy))
self.assertEquals(response.status_code, httplib.OK)
class CifTestCase(unittest.TestCase):
sample_file = \
"""
HDTPS.UCFCATE.PD1201131301122139DFTTISX FA130112300912
TIAACHEN 00081601LAACHEN 00005 0
TIABCWM 00385964VABERCWMBOI 78128 0
"""
class MockQuerySet():
def __init__(self, mockObj):
self._mock = mockObj
def count(self):
return 1
def __getitem__(self, index):
return self._mock
def setUp(self):
self.mock_entity_manager = mock()
self.provider = CifTimetableProvider(
entity_manager=self.mock_entity_manager
)
self.empty_query_set = mock()
self.entity_query_set = self.MockQuerySet(mock())
when(self.empty_query_set).count().thenReturn(0)
when(self.mock_entity_manager).get_entity(
'tiploc', 'ABCWM').thenReturn(self.empty_query_set)
when(self.mock_entity_manager).get_entity(
"tiploc", 'AACHEN').thenReturn(self.entity_query_set)
def testThatTiplocsAreLookedUp(self):
self.provider.import_from_string(self.sample_file)
verify(self.mock_entity_manager, times=2).get_entity(any(), any())
def testThatTiplocsAreLookedUpWithCorrectNamespace(self):
self.provider.import_from_string(self.sample_file)
verify(self.mock_entity_manager, times=2).get_entity("tiploc", any())
def testThatTiplocsAreLookedUpWithName(self):
self.provider.import_from_string(self.sample_file)
verify(self.mock_entity_manager).get_entity("tiploc", "AACHEN")
def testThatTiplocsAreLookedUpWithStrippedName(self):
self.provider.import_from_string(self.sample_file)
verify(self.mock_entity_manager).get_entity('tiploc', 'ABCWM')
def testThatTiplocsAreCreatedWhenNoneAreReturned(self):
self.provider.import_from_string(self.sample_file)
# Annoyingly mockito doesn't properly support assertions on the args
verify(self.mock_entity_manager).create(
source=any(),
primary_type=any(),
identifiers=any(),
titles=any()
)
def testThatTiplocsAreCreatedWithCorrectSource(self):
self.provider = CifTimetableProvider()
self.provider.import_from_string(self.sample_file)
entity = Entity.objects.get_entity('tiploc', 'ABCWM')
self.assertEquals(self.provider.source, entity[0].source)
def testThatTiplocsAreCreatedWithCorrectType(self):
self.provider = CifTimetableProvider()
self.provider.import_from_string(self.sample_file)
entity = Entity.objects.get_entity('tiploc', 'ABCWM')
self.assertEquals(self.provider.entity_type, entity[0].primary_type)
def testThatTiplocsAreCreatedWithCorrectName(self):
self.provider = CifTimetableProvider()
self.provider.import_from_string(self.sample_file)
entity = Entity.objects.get_entity('tiploc', 'ABCWM')
self.assertEquals('Abercwmboi', entity[0].title)
def testGetSource(self):
self.assertEquals(
'molly.apps.places.providers.cif',
self.provider.source.module_name
)
def testGetEntityTypeVerboseName(self):
self.assertEquals(
'rail network timing point',
self.provider.entity_type.verbose_name
)
def testGetEntityTypeVerboseNamePlural(self):
self.assertEquals(
'rail network timing points',
self.provider.entity_type.verbose_name_plural
)
def testGetEntityTypeVerboseNameSingular(self):
self.assertEquals(
'a rail network timing point',
self.provider.entity_type.verbose_name_singular
)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_9791 | import unittest
import os
from shutil import rmtree
from abc import ABC
import numpy as np
import z5py
class DatasetTestMixin(ABC):
def setUp(self):
self.shape = (100, 100, 100)
self.path = 'array.' + self.data_format
self.root_file = z5py.File(self.path, use_zarr_format=self.data_format == 'zarr')
self.base_dtypes = [
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64'
]
self.dtypes = tuple(
self.base_dtypes +
[np.dtype(s) for s in self.base_dtypes] +
[
'<i1', '<i2', '<i4', '<i8',
'<u1', '<u2', '<u4', '<u8',
'<f4', '<f8'
] +
[
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float32, np.float64
]
)
def tearDown(self):
try:
rmtree(self.path)
except OSError:
pass
def check_array(self, result, expected, msg=None):
self.assertEqual(result.shape, expected.shape, msg)
self.assertTrue(np.allclose(result, expected), msg)
def test_ds_open_empty(self):
self.root_file.create_dataset('test',
dtype='float32',
shape=self.shape,
chunks=(10, 10, 10))
ds = self.root_file['test']
out = ds[:]
self.check_array(out, np.zeros(self.shape))
def test_ds_dtypes(self):
shape = (100, 100)
chunks = (10, 10)
for dtype in self.dtypes:
ds = self.root_file.create_dataset('data_%s' % hash(dtype),
dtype=dtype,
shape=shape,
chunks=chunks)
in_array = np.random.rand(*shape).astype(dtype)
ds[:] = in_array
out_array = ds[:]
self.check_array(out_array, in_array,
'datatype %s failed for format %s' % (self.data_format.title(),
dtype))
def check_ones(self, sliced_ones, expected_shape, msg=None):
self.check_array(sliced_ones, np.ones(expected_shape, dtype=np.uint8), msg)
def test_ds_simple_write(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = np.ones(self.shape, np.uint8)
def test_ds_indexing(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = np.ones(self.shape, np.uint8)
self.check_ones(ds[:], self.shape, 'full index failed')
self.check_ones(ds[1, ...], (100, 100), 'trailing ellipsis failed')
self.check_ones(ds[..., 1], (100, 100), 'leading ellipsis failed')
self.check_ones(ds[1], (100, 100), 'implicit ellipsis failed')
self.check_ones(ds[:, :, :, ...], self.shape, 'superfluous ellipsis failed')
self.check_ones(ds[500:501, :, :], (0, 100, 100), 'out-of-bounds slice failed')
self.check_ones(ds[-501:500, :, :], (0, 100, 100), 'negative out-of-bounds slice failed')
self.check_ones(ds[1, :, :], (100, 100), 'integer index failed')
self.check_ones(ds[-20:, :, :], (20, 100, 100), 'negative slice failed')
self.assertEqual(ds[1, 1, 1], 1, 'point index failed')
with self.assertRaises(ValueError):
ds[500, :, :]
with self.assertRaises(ValueError):
ds[-500, :, :]
with self.assertRaises(ValueError):
ds[..., :, ...]
with self.assertRaises(ValueError):
ds[1, 1, slice(0, 100, 2)]
with self.assertRaises(TypeError):
ds[[1, 1, 1]] # explicitly test behaviour different to h5py
class NotAnIndex(object):
pass
with self.assertRaises(TypeError):
ds[1, 1, NotAnIndex()]
def test_ds_scalar_broadcast(self):
for dtype in self.base_dtypes:
ds = self.root_file.create_dataset('ones_%s' % dtype,
dtype=dtype,
shape=self.shape,
chunks=(10, 10, 10))
ds[:] = 1
self.check_ones(ds[:], self.shape)
def test_ds_scalar_broadcast_from_float(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = float(1)
self.check_ones(ds[:], self.shape),
def test_ds_scalar_broadcast_from_bool(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = True
self.check_ones(ds[:], self.shape)
def test_ds_set_with_arraylike(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[0, :2, :2] = [[1, 1], [1, 1]]
self.check_ones(ds[0, :2, :2], (2, 2))
def test_ds_set_from_float(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = np.ones(self.shape, dtype=float)
self.check_ones(ds[:], self.shape)
def test_ds_set_from_bool(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = np.ones(self.shape, dtype=bool)
self.check_ones(ds[:], self.shape)
def test_ds_fancy_broadcast_fails(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
with self.assertRaises(ValueError):
ds[0, :10, :10] = np.ones(10, dtype=np.uint8)
def test_ds_write_object_fails(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
class ArbitraryObject(object):
pass
with self.assertRaises(OSError):
ds[0, 0, :2] = [ArbitraryObject(), ArbitraryObject()]
def test_ds_write_flexible_fails(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
with self.assertRaises(TypeError):
ds[0, 0, 0] = "hey, you're not a number"
def test_readwrite_multithreaded(self):
for n_threads in (1, 2, 4, 8):
ds = self.root_file.create_dataset('data_mthread_%i' % n_threads,
dtype='float64',
shape=self.shape,
chunks=(10, 10, 10),
n_threads=n_threads)
in_array = np.random.rand(*self.shape)
ds[:] = in_array
out_array = ds[:]
self.check_array(out_array, in_array)
def test_create_nested_dataset(self):
self.root_file.create_dataset('group/sub_group/data',
shape=self.shape,
dtype='float64',
chunks=(10, 10, 10))
self.assertTrue(os.path.exists(os.path.join(self.path, 'group', 'sub_group', 'data')))
def test_create_with_data(self):
in_array = np.random.rand(*self.shape)
ds = self.root_file.create_dataset('data', data=in_array)
out_array = ds[:]
self.check_array(out_array, in_array)
def test_require_dataset(self):
in_array = np.random.rand(*self.shape)
self.root_file.require_dataset('data', data=in_array,
shape=in_array.shape,
dtype=in_array.dtype)
ds = self.root_file.require_dataset('data',
shape=in_array.shape,
dtype=in_array.dtype)
out_array = ds[:]
self.check_array(out_array, in_array)
def test_non_contiguous(self):
ds = self.root_file.create_dataset('test',
dtype='float32',
shape=self.shape,
chunks=(10, 10, 10))
# make a non-contiguous 3d array of the correct shape (100)^3
vol = np.arange(200**3).astype('float32').reshape((200, 200, 200))
in_array = vol[::2, ::2, ::2]
ds[:] = in_array
out_array = ds[:]
self.check_array(out_array, in_array, 'failed for non-contiguous data')
def test_empty_chunk(self):
ds = self.root_file.create_dataset('test',
dtype='float32',
shape=self.shape,
chunks=(10, 10, 10))
bb = np.s_[:10, :10, :10]
if ds.is_zarr:
chunk_path = os.path.join(self.path, 'test', '0.0.0')
else:
chunk_path = os.path.join(self.path, 'test', '0', '0', '0')
ds[bb] = 0
self.assertFalse(os.path.exists(chunk_path))
ds[bb] = 1
self.assertTrue(os.path.exists(chunk_path))
ds[bb] = 0
self.assertFalse(os.path.exists(chunk_path))
def test_invalid_options(self):
with self.assertRaises(RuntimeError):
self.root_file.create_dataset('test1', shape=self.shape, dtype='float32',
chunks=(10, 10, 10), compression='raw',
level=5)
with self.assertRaises(RuntimeError):
self.root_file.create_dataset('test2', shape=self.shape, dtype='float32',
chunks=(10, 10, 10), compression='gzip',
level=5, blub='blob')
def test_readwrite_chunk(self):
shape = (100, 100)
chunks = (10, 10)
for dtype in self.base_dtypes:
ds = self.root_file.create_dataset('test_%s' % dtype, dtype=dtype,
shape=shape, chunks=chunks,
compression='raw')
# test empty chunk
out = ds.read_chunk((0, 0))
self.assertEqual(out, None)
# test read/write
chunks_per_dim = ds.chunks_per_dimension
for x in range(chunks_per_dim[0]):
for y in range(chunks_per_dim[1]):
data = np.random.rand(*chunks)
if dtype not in ('float32', 'float64'):
data *= 128
data = data.astype(dtype)
ds.write_chunk((x, y), data)
out = ds.read_chunk((x, y))
self.assertEqual(data.shape, out.shape)
self.assertTrue(np.allclose(data, out))
def test_read_direct(self):
shape = (100, 100)
chunks = (10, 10)
ds = self.root_file.create_dataset('test', dtype='float64',
shape=shape, chunks=chunks,
compression='raw')
# generate test data
data = np.random.rand(*shape)
ds[:] = data
# test reading full dataset
out = np.zeros(shape)
ds.read_direct(out)
self.assertTrue(np.allclose(out, data))
# test reading with selection
selection = np.s_[11:53, 67:84]
out = np.zeros(shape)
ds.read_direct(out, selection, selection)
self.assertTrue(np.allclose(out[selection], data[selection]))
def test_write_direct(self):
shape = (100, 100)
chunks = (10, 10)
ds = self.root_file.create_dataset('test', dtype='float64',
shape=shape, chunks=chunks,
compression='raw')
# generate test data
data = np.random.rand(*shape)
# test writing full dataset
ds.write_direct(data)
out = ds[:]
self.assertTrue(np.allclose(out, data))
# test writing with selection
ds[:] = 0
selection = np.s_[11:53, 67:84]
ds.write_direct(data, selection, selection)
out = ds[:]
self.assertTrue(np.allclose(out[selection], data[selection]))
def test_irregular_chunks(self):
shape = (123, 54, 211)
chunks = (13, 33, 22)
ds = self.root_file.create_dataset('test', dtype='float64',
shape=shape, chunks=chunks,
compression='raw')
data = np.random.rand(*shape)
ds[:] = data
out = ds[:]
self.assertTrue(np.allclose(out, data))
def test_nd(self):
f = self.root_file
for ndim in range(1, 6):
size = 100 if ndim < 4 else 20
shape = (size,) * ndim
chunks = (10,) * ndim
ds = f.create_dataset('test_%i' % ndim, dtype='float64',
shape=shape, chunks=chunks, compression='raw')
data = np.random.rand(*shape)
ds[:] = data
out = ds[:]
self.assertTrue(np.allclose(out, data))
def test_no_implicit_squeeze(self):
arr = np.ones((5, 5, 5))
ds = self.root_file.create_dataset('ds', data=arr)
self.assertEqual(ds[:, 0:1, :].shape, arr[:, 0:1, :].shape)
def test_no_implicit_squeeze_singleton(self):
"""Issue #102
https://github.com/constantinpape/z5/issues/102
"""
arr = np.ones((5, 5, 5))
ds = self.root_file.create_dataset('ds', data=arr)
self.assertEqual(
ds[0:1, 0:1, 0:1].shape,
arr[0:1, 0:1, 0:1].shape,
)
def test_explicit_squeeze(self):
"""Issue #103
https://github.com/constantinpape/z5/issues/103
"""
arr = np.full((5, 4, 3), 1)
ds = self.root_file.create_dataset('ds543', data=arr)
self.assertEqual(ds[:, 1, :].shape, arr[:, 1, :].shape)
self.assertNotIsInstance(ds[1, 1, 1], np.ndarray)
def test_singleton_dtype(self):
"""Issue #102
https://github.com/constantinpape/z5/issues/102
"""
arr = np.ones((5, 5, 5))
ds = self.root_file.create_dataset('ds', data=arr)
self.assertEqual(type(ds[1, 1, 1]), type(arr[1, 1, 1]))
def test_broadcast_empty(self):
"""Issue #107
https://github.com/constantinpape/z5/issues/107
"""
ds = self.root_file.create_dataset('test', shape=(100, 100), chunks=(25, 25),
dtype='uint8', compression='raw')
ds[:20, :20] = 1
out = ds[:]
self.assertTrue(np.allclose(out[:20, :20], 1))
def test_empty_chunks_non_aligned_write(self):
"""Issue #106
https://github.com/constantinpape/z5/issues/106
"""
ds = self.root_file.create_dataset(name='test', shape=(128,), chunks=(32,),
compression='raw', dtype='uint8')
inp = np.ones((100,), dtype='uint8')
inp[90:100] = 0
ds[:100] = inp
# last chunk should be empty, but this is not the case if buffer was not
# cleared correctly
out = ds[-32:]
self.assertTrue(np.allclose(out, 0))
class TestZarrDataset(DatasetTestMixin, unittest.TestCase):
data_format = 'zarr'
def test_varlen(self):
shape = (100, 100)
chunks = (10, 10)
ds = self.root_file.create_dataset('varlen', dtype='float64',
shape=shape, chunks=chunks,
compression='raw')
with self.assertRaises(RuntimeError):
ds.write_chunk((0, 0), np.random.rand(10), True)
class TestN5Dataset(DatasetTestMixin, unittest.TestCase):
data_format = 'n5'
def test_varlen(self):
shape = (100, 100)
chunks = (10, 10)
ds = self.root_file.create_dataset('varlen', dtype='float64',
shape=shape, chunks=chunks,
compression='raw')
# max_len = 100
max_len = 10
chunks_per_dim = ds.chunks_per_dimension
for x in range(chunks_per_dim[0]):
for y in range(chunks_per_dim[1]):
test_data = np.random.rand(np.random.randint(1, max_len))
ds.write_chunk((x, y), test_data, True)
out = ds.read_chunk((x, y))
self.assertEqual(test_data.shape, out.shape)
self.assertTrue(np.allclose(test_data, out))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_9795 | from os import path, environ
from os.path import join, abspath, dirname
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst')) as f:
readme = f.read()
with open(join(here, 'requirements.txt')) as f:
required = f.read().splitlines()
with open(join(abspath(dirname(__file__)), "VERSION"), "r") as v:
VERSION = v.read().replace("\n", "")
with open(join(abspath(dirname(__file__)), "PATCH"), "r") as v:
PATCH = v.read().replace("\n", "")
setup(
name='patton-cli',
version=f"{VERSION}.{PATCH}",
packages=find_packages(),
long_description=readme,
install_requires=required,
url='https://github.com/bbva/patton-cli',
license='MIT',
author='BBVA Labs',
description='CLI for Patton-Server: The vulnerability knowledge store',
entry_points={'console_scripts': [
'patton = patton_client.cli:main',
]},
classifiers=[
'Environment :: Console',
'Intended Audience :: System Administrators',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
],
)
|
the-stack_0_9796 | # https://codeforces.com/problemset/problem/1358/A
def find_min_lamps(n: int, m: int) -> int:
if n%2 ==0:
count = n//2 * m
else:
if m%2 == 0:
count = m//2 * n
else:
count = n//2 * m + m//2 + 1
return count
def main():
t = int(input())
cases = [list(map(int, input().split())) for _ in range(t)]
for case in cases:
print(find_min_lamps(case[0], case[1]))
if __name__=='__main__':
main() |
the-stack_0_9797 | import argparse
import os
import sys
import time
import re
from tqdm import tqdm
from datetime import datetime
import numpy as np
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import torch.onnx
import utils
from transformer_net import TransformerNet
from vgg import Vgg16
def check_paths(args):
try:
if not os.path.exists(args.save_model_dir):
os.makedirs(args.save_model_dir)
if args.checkpoint_model_dir is not None and not (os.path.exists(args.checkpoint_model_dir)):
os.makedirs(args.checkpoint_model_dir)
except OSError as e:
print(e)
sys.exit(1)
def train(args):
device = torch.device("cuda" if args.cuda else "cpu")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
transform = transforms.Compose([
transforms.Resize(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
train_dataset = datasets.ImageFolder(args.dataset, transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size)
transformer = TransformerNet().to(device)
optimizer = Adam(transformer.parameters(), args.lr)
mse_loss = torch.nn.MSELoss()
vgg = Vgg16(requires_grad=False).to(device)
style_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
style = utils.load_image(args.style_image, size=args.style_size)
style = style_transform(style)
style = style.repeat(args.batch_size, 1, 1, 1).to(device)
features_style = vgg(utils.normalize_batch(style))
gram_style = [utils.gram_matrix(y) for y in features_style]
for e in range(args.epochs):
transformer.train()
agg_content_loss = 0.
agg_style_loss = 0.
count = 0
print('%s-training at epoch %d of %d...' % (datetime.now().strftime('%H:%M:%S.%f'), e, args.epochs))
with tqdm(total=len(train_loader)) as pbar:
for batch_id, (x, _) in enumerate(train_loader):
pbar.set_description('%s-training at batch %d...' % (datetime.now().strftime('%H:%M:%S.%f'), batch_id))
n_batch = len(x)
count += n_batch
optimizer.zero_grad()
x = x.to(device)
y = transformer(x)
y = utils.normalize_batch(y)
x = utils.normalize_batch(x)
features_y = vgg(y)
features_x = vgg(x)
content_loss = args.content_weight * mse_loss(features_y.relu2_2, features_x.relu2_2)
style_loss = 0.
for ft_y, gm_s in zip(features_y, gram_style):
gm_y = utils.gram_matrix(ft_y)
style_loss += mse_loss(gm_y, gm_s[:n_batch, :, :])
style_loss *= args.style_weight
total_loss = content_loss + style_loss
total_loss.backward()
optimizer.step()
agg_content_loss += content_loss.item()
agg_style_loss += style_loss.item()
if (batch_id + 1) % args.log_interval == 0:
mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.6f}\tstyle: {:.6f}\ttotal: {:.6f}".format(
time.ctime(), e + 1, count, len(train_dataset),
agg_content_loss / (batch_id + 1),
agg_style_loss / (batch_id + 1),
(agg_content_loss + agg_style_loss) / (batch_id + 1)
)
print(mesg)
if args.checkpoint_model_dir is not None and (batch_id + 1) % args.checkpoint_interval == 0:
transformer.eval().cpu()
ckpt_model_filename = "ckpt_epoch_" + str(e) + "_batch_id_" + str(batch_id + 1) + ".pth"
ckpt_model_path = os.path.join(args.checkpoint_model_dir, ckpt_model_filename)
torch.save(transformer.state_dict(), ckpt_model_path)
transformer.to(device).train()
pbar.update(1)
# save model
transformer.eval().cpu()
save_model_filename = "epoch_" + str(args.epochs) + "_" + str(time.ctime()).replace(' ', '_') + "_" + str(
args.content_weight) + "_" + str(args.style_weight) + ".model"
save_model_path = os.path.join(args.save_model_dir, save_model_filename)
torch.save(transformer.state_dict(), save_model_path)
print("\nDone, trained model saved at", save_model_path)
def stylize(args):
device = torch.device("cuda" if args.cuda else "cpu")
image_extensions = ['.jpg', '.jpeg', '.png']
if os.path.isfile(args.content_image):
content_files = [args.content_image]
else:
content_files = [os.path.join(args.content_image, f) for f in os.listdir(args.content_image) if
os.path.splitext(f)[-1].lower() in image_extensions]
if os.path.isfile(args.model):
model_files = [args.model]
else:
model_files = [os.path.join(args.model, f) for f in os.listdir(args.model) if
f.endswith('.pth') or f.endswith('.model') or f.endswith('.onnx')]
with tqdm(total=len(content_files) * len(model_files)) as pbar:
for content_file in content_files:
content_image = utils.load_image(content_file, scale=args.content_scale)
content_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0).to(device)
for model_file in model_files:
if len(content_files) == 1 and len(model_files) == 1 and not os.path.isdir(args.output_image):
output_file = args.output_image
else:
content = os.path.splitext(os.path.basename(content_file))[0]
style = os.path.splitext(os.path.basename(model_file))[0]
output_file = os.path.join(args.output_image, content + '+' + style + '.png')
pbar.set_description('%s-generating %s...' % (datetime.now().strftime('%H:%M:%S.%f'), output_file))
if args.model.endswith(".onnx"):
args.model = model_file
output = stylize_onnx(content_image, args)
else:
with torch.no_grad():
style_model = TransformerNet()
state_dict = torch.load(model_file)
# remove saved deprecated running_* keys in InstanceNorm from the checkpoint
for k in list(state_dict.keys()):
if re.search(r'in\d+\.running_(mean|var)$', k):
del state_dict[k]
style_model.load_state_dict(state_dict)
style_model.to(device)
style_model.eval()
if args.export_onnx:
assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
output = torch.onnx._export(
style_model, content_image, args.export_onnx, opset_version=11,
).cpu()
else:
output = style_model(content_image).cpu()
utils.save_image(output_file, output[0])
pbar.update(1)
def stylize_onnx(content_image, args):
"""
Read ONNX model and run it using onnxruntime
"""
assert not args.export_onnx
import onnxruntime
ort_session = onnxruntime.InferenceSession(args.model)
def to_numpy(tensor):
return (
tensor.detach().cpu().numpy()
if tensor.requires_grad
else tensor.cpu().numpy()
)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(content_image)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
return torch.from_numpy(img_out_y)
def main():
main_arg_parser = argparse.ArgumentParser(description="parser for fast-neural-style")
subparsers = main_arg_parser.add_subparsers(title="subcommands", dest="subcommand")
train_arg_parser = subparsers.add_parser("train", help="parser for training arguments")
train_arg_parser.add_argument("--epochs", type=int, default=2,
help="number of training epochs, default is 2")
train_arg_parser.add_argument("--batch-size", type=int, default=4,
help="batch size for training, default is 4")
train_arg_parser.add_argument("--dataset", type=str, required=True,
help="path to training dataset, the path should point to a folder "
"containing another folder with all the training images")
train_arg_parser.add_argument("--style-image", type=str, default="images/style-images/mosaic.jpg",
help="path to style-image")
train_arg_parser.add_argument("--save-model-dir", type=str, required=True,
help="path to folder where trained model will be saved.")
train_arg_parser.add_argument("--checkpoint-model-dir", type=str, default=None,
help="path to folder where checkpoints of trained models will be saved")
train_arg_parser.add_argument("--image-size", type=int, default=256,
help="size of training images, default is 256 X 256")
train_arg_parser.add_argument("--style-size", type=int, default=None,
help="size of style-image, default is the original size of style image")
train_arg_parser.add_argument("--cuda", type=int, required=True,
help="set it to 1 for running on GPU, 0 for CPU")
train_arg_parser.add_argument("--seed", type=int, default=42,
help="random seed for training")
train_arg_parser.add_argument("--content-weight", type=float, default=1e5,
help="weight for content-loss, default is 1e5")
train_arg_parser.add_argument("--style-weight", type=float, default=1e10,
help="weight for style-loss, default is 1e10")
train_arg_parser.add_argument("--lr", type=float, default=1e-3,
help="learning rate, default is 1e-3")
train_arg_parser.add_argument("--log-interval", type=int, default=500,
help="number of images after which the training loss is logged, default is 500")
train_arg_parser.add_argument("--checkpoint-interval", type=int, default=2000,
help="number of batches after which a checkpoint of the trained model will be created")
eval_arg_parser = subparsers.add_parser("eval", help="parser for evaluation/stylizing arguments")
eval_arg_parser.add_argument("--content-image", type=str, required=True,
help="path to content image you want to stylize")
eval_arg_parser.add_argument("--content-scale", type=float, default=None,
help="factor for scaling down the content image")
eval_arg_parser.add_argument("--output-image", type=str, required=True,
help="path for saving the output image")
eval_arg_parser.add_argument("--model", type=str, required=True,
help="saved model to be used for stylizing the image. If file ends in .pth - PyTorch path is used, if in .onnx - Caffe2 path")
eval_arg_parser.add_argument("--cuda", type=int, required=True,
help="set it to 1 for running on GPU, 0 for CPU")
eval_arg_parser.add_argument("--export_onnx", type=str,
help="export ONNX model to a given file")
args = main_arg_parser.parse_args()
if args.subcommand is None:
print("ERROR: specify either train or eval")
sys.exit(1)
if args.cuda and not torch.cuda.is_available():
print("ERROR: cuda is not available, try running on CPU")
sys.exit(1)
if args.subcommand == "train":
check_paths(args)
train(args)
else:
stylize(args)
if __name__ == "__main__":
main()
|
the-stack_0_9799 | import hashlib
import hmac
import os
from collections import namedtuple
from functools import lru_cache
from typing import List, Tuple
from urllib.parse import urlparse, urlunparse, quote
from botocore.client import ClientEndpointBridge
from botocore.loaders import create_loader
from botocore.model import ServiceModel
from botocore.regions import EndpointResolver
from botocore.session import Session
from notebook.base.handlers import APIHandler
from tornado.httpclient import (
AsyncHTTPClient,
HTTPRequest,
HTTPResponse,
HTTPClientError,
HTTPError,
)
from tornado.httputil import HTTPServerRequest, HTTPHeaders
ServiceInfo = namedtuple(
"ServiceInfo", ["service_name", "host", "endpoint_url", "credential_scope"]
)
UpstreamAuthInfo = namedtuple(
"UpstreamAuthInfo", ["service_name", "region", "signed_headers"]
)
# maxsize is arbitrarily taken from https://docs.python.org/3/library/functools.html#functools.lru_cache
@lru_cache(maxsize=128)
def get_service_info(
endpoint_resolver: EndpointResolver, service_name: str, region: str
) -> ServiceInfo:
service_model_json = create_loader().load_service_model(service_name, "service-2")
service_data = ClientEndpointBridge(endpoint_resolver).resolve(
service_name=ServiceModel(
service_model_json, service_name=service_name
).endpoint_prefix,
region_name=region,
)
return ServiceInfo(
service_name,
service_data["metadata"]["hostname"],
service_data["endpoint_url"],
service_data["metadata"].get("credentialScope"),
)
def create_endpoint_resolver() -> EndpointResolver:
"""
Creates an instance of the botocore EndpointResolver. Used to inject the instance during application initialization
to avoid loading endpoint data on a per-request basis.
:return: the EndpointResolver instance
"""
return EndpointResolver(create_loader().load_data("endpoints"))
class AwsProxyHandler(APIHandler):
def initialize(self, endpoint_resolver: EndpointResolver, session: Session):
"""
Hook for Tornado handler initialization.
:param session: the botocore session
:param endpoint_resolver: the application level EndpointResolver instance
"""
self.endpoint_resolver = endpoint_resolver
self.session = session
async def handle_request(self):
try:
response = await AwsProxyRequest(
self.request, self.endpoint_resolver, self.session
).execute_downstream()
self.set_status(response.code, response.reason)
self._finish_response(response)
except HTTPClientError as e:
self.set_status(e.code, e.message)
if e.response:
self._finish_response(e.response)
else:
super(APIHandler, self).finish()
def _finish_response(self, response: HTTPResponse):
for name, value in response.headers.get_all():
if self._is_blacklisted_response_header(name, value):
continue
self.set_header(name, value)
super(APIHandler, self).finish(response.body or None)
async def post(self, *args):
await self.handle_request()
async def get(self, *args):
await self.handle_request()
async def delete(self, *args):
await self.handle_request()
async def patch(self, *args):
await self.handle_request()
async def put(self, *args):
await self.handle_request()
async def head(self, *args):
await self.handle_request()
@staticmethod
def _is_blacklisted_response_header(name: str, value: str) -> bool:
if name == "Transfer-Encoding" and value == "chunked":
# Responses are no longer "chunked" when we send them to the browser.
# If we retain this header, then the browser will wait forever for more chunks.
return True
elif name == "Content-Length":
# Tornado will auto-set the Content-Length
return True
else:
return False
class AwsProxyRequest(object):
"""
A class representing a request being proxied from an upstream client (browser) to the downstream AWS service.
"""
BLACKLISTED_REQUEST_HEADERS: List[str] = ["Origin", "Host"]
def __init__(
self,
upstream_request: HTTPServerRequest,
endpoint_resolver: EndpointResolver,
session: Session,
):
"""
:param upstream_request: The original upstream HTTP request from the client(browser) to Jupyter
:param endpoint_resolver: The botocore endpoint_resolver instance
"""
self.upstream_request = upstream_request
self.endpoint_resolver = endpoint_resolver
self.credentials = session.get_credentials()
self.upstream_auth_info = self._build_upstream_auth_info()
self.service_info = get_service_info(
endpoint_resolver,
self.upstream_auth_info.service_name,
self.upstream_auth_info.region,
)
# if the environment variable is not specified, os.getenv returns None, and no whitelist is in effect.
self.whitelisted_services = (
os.getenv("AWS_JUPYTER_PROXY_WHITELISTED_SERVICES").strip(",").split(",")
if os.getenv("AWS_JUPYTER_PROXY_WHITELISTED_SERVICES") is not None
else None
)
async def execute_downstream(self) -> HTTPResponse:
"""
Executes the downstream request (Jupyter to AWS service) and return the response or the error
after adding SigV4 authentication.
"allow_nonstandard_methods" is used because Tornado rejects POST requests without a body without this parameter,
and some operations send such requests (such as S3.InitiateMultipartUpload)
:return: the HTTPResponse
"""
if (
self.whitelisted_services is not None
and self.service_info.service_name not in self.whitelisted_services
):
raise HTTPError(
403,
message=f"Service {self.service_info.service_name} is not whitelisted for proxying requests",
)
base_service_url = urlparse(self.service_info.endpoint_url)
start_index = self.upstream_request.path.index("/awsproxy") + len("/awsproxy")
downstream_request_path = (
base_service_url.path + self.upstream_request.path[start_index:] or "/"
)
return await AsyncHTTPClient().fetch(
HTTPRequest(
method=self.upstream_request.method,
url=self._compute_downstream_url(downstream_request_path),
headers=self._compute_downstream_headers(downstream_request_path),
body=self.upstream_request.body or None,
follow_redirects=False,
allow_nonstandard_methods=True,
)
)
def _compute_downstream_url(self, downstream_request_path) -> str:
base_service_url = urlparse(self.service_info.endpoint_url)
return urlunparse(
[
base_service_url.scheme,
base_service_url.netloc,
downstream_request_path,
base_service_url.params,
self.upstream_request.query,
None,
]
)
def _compute_downstream_headers(self, downstream_request_path) -> HTTPHeaders:
"""
1. Copy original headers apart from blacklisted ones
2. Add the Host header based on the service model
3. Add a security token header if the current session is using temporary credentials
4. Add the SigV4 Authorization header.
:param downstream_request_path: the URL path for the downstream service request
:return: the headers to pass to the downstream request
"""
downstream_request_headers = self.upstream_request.headers.copy()
for blacklisted_request_header in self.BLACKLISTED_REQUEST_HEADERS:
try:
del downstream_request_headers[blacklisted_request_header]
except KeyError:
pass
base_service_url = urlparse(self.service_info.endpoint_url)
downstream_request_headers["Host"] = base_service_url.netloc
if self.credentials.token:
downstream_request_headers["X-Amz-Security-Token"] = self.credentials.token
downstream_request_headers["Authorization"] = self._sigv4_auth_header(
downstream_request_path
)
return downstream_request_headers
def _sigv4_auth_header(self, downstream_request_path) -> str:
"""
Computes the SigV4 signature following https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
:param downstream_request_path: the URL path for the downstream service's request
:return: the Authorization header containing SigV4 credetntials
"""
# ************* TASK 1: CREATE THE CANONICAL REQUEST*************
canonical_method = self.upstream_request.method
canonical_uri = quote(downstream_request_path)
canonical_querystring = self._get_canonical_querystring()
signed_headers, canonical_headers = self._get_signed_canonical_headers()
payload_hash = hashlib.sha256(self.upstream_request.body).hexdigest()
canonical_request = (
f"{canonical_method}\n"
f"{canonical_uri}\n"
f"{canonical_querystring}\n"
f"{canonical_headers}\n"
f"{signed_headers}\n"
f"{payload_hash}"
)
# ************* TASK 2: CREATE THE STRING TO SIGN*************
algorithm = "AWS4-HMAC-SHA256"
region = self._get_downstream_signing_region()
amz_date = self.upstream_request.headers["X-Amz-Date"]
date_stamp = amz_date[0:8]
credential_scope = (
f"{date_stamp}/{region}/{self.service_info.service_name}/aws4_request"
)
request_digest = hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()
string_to_sign = (
f"{algorithm}\n" f"{amz_date}\n" f"{credential_scope}\n" f"{request_digest}"
)
# ************* TASK 3: CALCULATE THE SIGNATURE *************
signing_key = get_signature_key(
self.credentials.secret_key,
date_stamp,
region,
self.service_info.service_name,
)
signature = hmac.new(
signing_key, string_to_sign.encode("utf-8"), hashlib.sha256
).hexdigest()
# ************* TASK 4: BUILD THE AUTH HEADER *************
authorization_header = (
f"{algorithm} "
f"Credential={self.credentials.access_key}/{credential_scope}, "
f"SignedHeaders={signed_headers}, "
f"Signature={signature}"
)
return authorization_header
def _get_canonical_querystring(self) -> str:
canonical_query_string = ""
corrected_request_query = self.upstream_request.query.replace("+", "%20")
if corrected_request_query != "":
query_string_list = []
for item in corrected_request_query.split("&"):
query_string_part = item.split("=", maxsplit=1)
if len(query_string_part) == 2:
query_string_list.append(query_string_part)
elif len(query_string_part) == 1:
query_string_part.append("")
query_string_list.append(query_string_part)
else:
raise ValueError(f"Invalid query string split for {item}")
query_string_dict = dict(query_string_list)
sorted_q_string_list = [
f"{k}={query_string_dict[k]}" for k in sorted(query_string_dict)
]
canonical_query_string = "&".join(sorted_q_string_list)
return canonical_query_string
def _get_signed_canonical_headers(self) -> Tuple[str, str]:
canonical_headers = {}
for signed_header in self.upstream_auth_info.signed_headers:
canonical_headers[signed_header] = self.upstream_request.headers[
signed_header
]
base_service_url = urlparse(self.service_info.endpoint_url)
canonical_headers["host"] = base_service_url.netloc
if self.credentials.token:
canonical_headers["x-amz-security-token"] = self.credentials.token
canonical_headers_string = "\n".join(
[
f"{canonical_header}:{canonical_headers[canonical_header]}"
for canonical_header in sorted(canonical_headers)
]
)
canonical_headers_string += "\n"
signed_headers = ";".join(sorted(canonical_headers))
return signed_headers, canonical_headers_string
def _get_downstream_signing_region(self) -> str:
"""
Get the region to sign the downstream request for. The default is the region that the request was originally
signed, but if the service has a credentialScope override specified in the service config then that is used.
:return: the region to sign the request with.
"""
if not self.service_info.credential_scope:
return self.upstream_auth_info.region
try:
return self.service_info.credential_scope["region"]
except KeyError:
return self.upstream_auth_info.region
def _build_upstream_auth_info(self) -> UpstreamAuthInfo:
"""
Parses the upstream requests's Authorization header to determine identifying information such as the region and
the service the request was originally signed for.
Sample header:
AWS4-HMAC-SHA256 \
Credential=SOMEACCESSKEY/20190814/aws_region/aws_service/aws4_request, \
SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-target;x-amz-user-agent, \
Signature=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
:return: the UpstreamAuthInfo instance
"""
auth_header_parts = self.upstream_request.headers["Authorization"].split(" ")
signed_headers = auth_header_parts[2].strip(",").split("=")[1].split(";")
_, _, region, service_name, _ = auth_header_parts[1].split("=")[1].split("/")
return UpstreamAuthInfo(service_name, region, signed_headers)
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def get_signature_key(key, date_stamp, region_name, service_name):
k_date = sign(("AWS4" + key).encode("utf-8"), date_stamp)
k_region = sign(k_date, region_name)
k_service = sign(k_region, service_name)
k_signing = sign(k_service, "aws4_request")
return k_signing
|
the-stack_0_9800 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# scvi documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
import scvi
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "2.0" # Nicer param docs
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"nbsphinx",
"nbsphinx_link",
"sphinx_autodoc_typehints",
"sphinx.ext.mathjax",
"sphinx_rtd_theme",
"sphinx.ext.intersphinx",
"autodocsumm",
]
# nbsphinx specific settings
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
nbsphinx_execute = "never"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
autodoc_default_options = {"autosummary": True}
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"scVI"
copyright = u"2020, Romain Lopez, Adam Gayoso, Pierre Boyeau"
author = u"Romain Lopez"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = scvi.__version__
# The full version, including alpha/beta/rc tags.
release = scvi.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_show_sphinx = False
def setup(app):
app.add_stylesheet("css/custom.css")
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "scvidoc"
mathjax_config = {
"extensions": ["tex2jax.js"],
"jax": ["input/TeX", "output/HTML-CSS"],
"tex2jax": {
"inlineMath": [["$", "$"], ["\\(", "\\)"]],
"displayMath": [["$$", "$$"], ["\\[", "\\]"]],
"processEscapes": True,
},
}
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, "scvi.tex", u"scVI Documentation", u"Romain Lopez", "manual")
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "scvi", u"scVI Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"scvi",
u"scVI Documentation",
author,
"scvi",
"One line description of project.",
"Miscellaneous",
)
]
|
the-stack_0_9804 | import cv2,os,PIL
import numpy as np
from keras.applications.vgg16 import decode_predictions
from keras.applications import ResNet50, Xception, InceptionV3, VGG16, VGG19
from keras.preprocessing import image as Image
from keras.applications.vgg16 import preprocess_input
from tqdm import tqdm
from skimage import feature
from keras.models import Model
import math
import pandas as pd
from scipy.cluster.vq import kmeans
from scipy.cluster.vq import whiten
class LocalBinaryPatterns:
def __init__(self, numPoints, radius):
# store the number of points and radius
self.numPoints = numPoints
self.radius = radius
def describe(self, image, eps=1e-7):
# compute the Local Binary Pattern representation
# of the image, and then use the LBP representation
# to build the histogram of patterns
lbp = feature.local_binary_pattern(image, self.numPoints,
self.radius, method="nri_uniform")
#print(list(lbp.ravel()))
#print(set(list(lbp.ravel())))
(hist, _) = np.histogram(lbp.ravel(),
bins=np.arange(0, self.numPoints*(self.numPoints-1) + 3),
range=(0, self.numPoints*(self.numPoints-1) + 2))
# normalize the histogram
hist = hist.astype("float")
hist /= (hist.sum() + eps)
# return the histogram of Local Binary Patterns
return hist
def image_feature_extraction(x, model):
data = preprocess_input(x)
layer_model = Model(inputs=model.input, outputs=model.layers[-2].output)
features = layer_model.predict(data)
print(im)
print(features.shape)
# print(features)
return features
def image_tags_extraction(x, model):
data = preprocess_input(x)
yhat = model.predict(data)
labels = decode_predictions(yhat, top=10)[0]
print(labels)
return labels
def image_colorfulness(image):
# split the image into its respective RGB components
(B, G, R) = cv2.split(image.astype("float"))
# compute rg = R - G
rg = np.absolute(R - G)
# compute yb = 0.5 * (R + G) - B
yb = np.absolute(0.5 * (R + G) - B)
# compute the mean and standard deviation of both `rg` and `yb`
(rbMean, rbStd) = (np.mean(rg), np.std(rg))
(ybMean, ybStd) = (np.mean(yb), np.std(yb))
# combine the mean and standard deviations
stdRoot = np.sqrt((rbStd ** 2) + (ybStd ** 2))
meanRoot = np.sqrt((rbMean ** 2) + (ybMean ** 2))
# derive the "colorfulness" metric and return it
return stdRoot + (0.3 * meanRoot)
def im_tags_embedding(labels, embedding_vector):
#print(labels[0])
words = []
for label in labels:
word = label[1]
#print(word)
words.append(word)
tags_matrix = []
zero_array = np.zeros(300)
for tag in words:
if tag in embedding_vector.keys():
tag_embedding = embedding_vector[tag]
tags_matrix.append(np.array(tag_embedding))
zero_array = zero_array+np.array(tag_embedding)
tag_feature = zero_array / len(tags_matrix)
return list(tag_feature)
def im_color_hist(im):
chans = cv2.split(im)
colors = ("b", "g", "r")
features = []
for (chan, color) in zip(chans, colors):
average_value = chan.mean()/256
features.append(average_value.item())
'''
hist = cv2.calcHist([chan],[0], None, [256], [0,255])
np.array(hist).flatten()
hist.resize(hist.size)
hist = list(hist/np.sum(hist))
features.extend(hist)
'''
return features
def dominant_color_rgb(image):
r = []
g = []
b = []
for line in image:
for pixel in line:
temp_r, temp_g, temp_b = pixel
r.append(temp_r)
g.append(temp_g)
b.append(temp_b)
df = pd.DataFrame({'red':r,'blue':b,'green':g})
df['scaled_red'] = whiten(df['red'])
df['scaled_blue'] = whiten(df['blue'])
df['scaled_green'] = whiten(df['green'])
cluster_center, distortion = kmeans(df[['scaled_red','scaled_green','scaled_blue']],1)
#print(cluster_center)
return cluster_center
def embedding_load(embedding_path):
embedding_vector = {}
f = open(embedding_path,'r',encoding='utf8')
for line in tqdm(f):
value = line.split(' ')
word = value[0]
coef = np.array(value[1:], dtype='float32')
embedding_vector[word] = coef
f.close()
return embedding_vector
if __name__ == '__main__':
model_names = ['VGG16','VGG19','ResNet50','InceptionV3','Xception']
dataset_name = 'dataset' #'silver_negative' 'silver_positive'
if dataset_name == 'dataset':
im_path = 'data/dataset_images/dataset_image/'
elif dataset_name == 'silver_negative':
im_path = 'data/silver_negative/'
elif dataset_name == 'silver_positive':
im_path = 'data/silver_positive/'
embedding_vector = embedding_load('embedding_300d.txt')
lbp_feature_dict = {}
other_feature_dict = {}
tags_embedding_feature_dict = {}
#last_layer_feature_dict = {}
#color_hist_feature_dict = {}
for model_name in model_names:
out_tag_file = open(dataset_name+'_'+ model_name + '_image_tags.txt', 'w', encoding='utf8')
deep_learning_feature_file_name = 'feature_data/' + dataset_name + '_'+ model_name +'_image_tag_feature.npy'
if model_name == 'VGG16':
model = VGG16(weights='imagenet', include_top=True)
im_size = 224
elif model_name == 'VGG19':
model = VGG19(weights='imagenet', include_top=True)
im_size = 224
elif model_name == 'ResNet50':
model = ResNet50(weights='imagenet', include_top=True)
im_size = 224
elif model_name == 'InceptionV3':
model = InceptionV3(weights='imagenet', include_top=True)
im_size = 299
elif model_name == 'Xception':
model = Xception(weights='imagenet', include_top=True)
im_size = 299
#print(model.summary())
for im in os.listdir(im_path):
print(im)
try:
img = Image.load_img(im_path + im, target_size=(im_size, im_size))
except:
print(im_path + im)
continue
x = Image.img_to_array(img)
x = np.expand_dims(x, axis=0)
# im_last_layer_feature = image_feature_extraction(x, model)
# print('im_last_layer_feature length ', len(im_last_layer_feature))
image_tags = image_tags_extraction(x, model)
tags = ''
for tag in image_tags:
# print(tag[1])
tags = tags + tag[1] + ' '
print(im + '\t' + tags + '\n')
out_tag_file.write(im + '\t' + tags + '\n')
tags_embedding_feature = im_tags_embedding(image_tags, embedding_vector)
tags_embedding_feature_dict[im] = tags_embedding_feature
np.save(deep_learning_feature_file_name, tags_embedding_feature_dict)
out_tag_file.close()
for im in os.listdir(im_path):
print(im)
im_size = os.path.getsize(im_path+im)
print('im_size:', im_size)
image = cv2.imread(im_path+im)
try:
dominant_color = dominant_color_rgb(image)
print('dominant_color:', dominant_color[0])
except:
dominant_color = np.array([[0,0,0]])
colorfulness = image_colorfulness(image)
print('colorfulness:', colorfulness)
sp = image.shape
high = sp[0]
width = sp[1]
print('sp',sp[2])
total = 0
b = 45
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#print(gray)
arr = np.array(image)
for h in range(arr.shape[0]):
for w in range(arr.shape[1]):
total += (arr[(h,w,0)]-b)*(arr[(h,w,0)]-b)
contast = total/high*width
print(contast)
if contast>0:
contrast = math.sqrt(contast)
else:
contast = contast*(-1)
contrast = math.sqrt(contast)*(-1)
print('contrast:', contrast)
desc = LocalBinaryPatterns(8, 1.0) # 59
hist_LBP = desc.describe(gray) #
print(hist_LBP)
#color_hist = im_color_hist(image) # 768
#color_hist.append(h)
#color_hist.append(w)
lbp_feature_dict[im] = list(hist_LBP)
other_feature_dict[im] = [im_size/1000, high, width, colorfulness, contrast/1000]+list(dominant_color[0])
#print([im_size/1000, high, width, colorfulness, contrast/1000]+list(dominant_color[0]))
#color_hist_feature_dict[im] = color_hist
#last_layer_feature_dict[im] = im_last_layer_feature
np.save('feature_data/' + dataset_name+'_image_LBP_feature.npy', lbp_feature_dict)
np.save('feature_data/' + dataset_name+'_image_other_feature.npy', other_feature_dict)
#np.save(dataset_name+'_image_color_feature.npy', color_hist_feature_dict)
|
the-stack_0_9805 | __all__ = ["ICOS"]
class ICOS:
""" Interface for processing ICOS data
"""
def __init__(self):
# Sampling period of ICOS data in seconds
self._sampling_period = "NA"
def read_file(self, data_filepath, site=None, network=None):
""" Reads ICOS data files and returns the UUIDS of the Datasources
the processed data has been assigned to
Args:
data_filepath (str or Path): Path of file to load
Returns:
list: UUIDs of Datasources data has been assigned to
"""
from pathlib import Path
from HUGS.Processing import assign_attributes
data_filepath = Path(data_filepath)
source_name = data_filepath.stem
if site is None:
site = source_name.split(".")[0]
species = source_name.split(".")[1]
# This should return xarray Datasets
gas_data = self.read_data(data_filepath=data_filepath, species=species, site=site)
# Assign attributes to the xarray Datasets here data here makes it a lot easier to test
gas_data = assign_attributes(data=gas_data, site=site, sampling_period=self._sampling_period)
return gas_data
def read_data(self, data_filepath, species, site=None):
""" Separates the gases stored in the dataframe in
separate dataframes and returns a dictionary of gases
with an assigned UUID as gas:UUID and a list of the processed
dataframes
TODO - update this to process multiple species here?
Args:
data_filepath (pathlib.Path): Path of datafile
species (str): Species to process
Returns:
dict: Dictionary containing attributes, data and metadata keys
"""
from pandas import read_csv, Timestamp
import numpy as np
from HUGS.Util import read_header
# metadata = read_metadata(filepath=data_filepath, data=data, data_type="ICOS")
header = read_header(filepath=data_filepath)
n_skip = len(header) - 1
species = "co2"
def date_parser(year, month, day, hour, minute):
return Timestamp(year, month, day, hour, minute)
datetime_columns = {"time": ["Year", "Month", "Day", "Hour", "Minute"]}
use_cols = [
"Year",
"Month",
"Day",
"Hour",
"Minute",
str(species.lower()),
"Stdev",
"NbPoints",
]
dtypes = {
"Day": np.int,
"Month": np.int,
"Year": np.int,
"Hour": np.int,
"Minute": np.int,
species.lower(): np.float,
"Stdev": np.float,
"SamplingHeight": np.float,
"NbPoints": np.int,
}
data = read_csv(
data_filepath,
skiprows=n_skip,
parse_dates=datetime_columns,
index_col="time",
sep=" ",
usecols=use_cols,
dtype=dtypes,
na_values="-999.99",
date_parser=date_parser,
)
data = data[data[species.lower()] >= 0.0]
# Drop duplicate indices
data = data.loc[~data.index.duplicated(keep="first")]
# Check if the index is sorted
if not data.index.is_monotonic_increasing:
data.sort_index()
rename_dict = {
"Stdev": species + " variability",
"NbPoints": species + " number_of_observations",
}
data = data.rename(columns=rename_dict)
# Conver to xarray Dataset
data = data.to_xarray()
combined_data = {}
site_attributes = {}
# Read some metadata from the filename
split_filename = data_filepath.name.split(".")
try:
site = split_filename[0]
time_resolution = split_filename[2]
inlet_height = split_filename[3]
except KeyError:
raise ValueError("Unable to read metadata from filename. We expect a filename such as tta.co2.1minute.222m.dat")
metadata = {
"site": site,
"species": species,
"inlet": inlet_height,
"time_resolution": time_resolution,
"network": "ICOS",
}
combined_data[species] = {
"metadata": metadata,
"data": data,
"attributes": site_attributes,
}
return combined_data
|
the-stack_0_9806 | import uuid
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
from clarifai_grpc.grpc.api.status import status_code_pb2
from tests.common import both_channels, metadata, raise_on_failure
@both_channels
def test_concept_post_get_patch(channel):
stub = service_pb2_grpc.V2Stub(channel)
random_string = uuid.uuid4().hex[:15]
random_concept_id = "concept-id-" + random_string
random_concept_name = "concept-name-的な-" + random_string
post_concepts_response = stub.PostConcepts(
service_pb2.PostConceptsRequest(
concepts=[resources_pb2.Concept(id=random_concept_id, name=random_concept_name)]
),
metadata=metadata(),
)
raise_on_failure(post_concepts_response)
get_concepts_response = stub.GetConcept(
service_pb2.GetConceptRequest(concept_id=random_concept_id), metadata=metadata()
)
raise_on_failure(get_concepts_response)
assert get_concepts_response.concept.id == random_concept_id
assert get_concepts_response.concept.name == random_concept_name
duplicated_post_concepts_response = stub.PostConcepts(
service_pb2.PostConceptsRequest(
concepts=[
resources_pb2.Concept(
id=random_concept_id,
)
]
),
metadata=metadata(),
)
assert (
duplicated_post_concepts_response.status.code
== status_code_pb2.StatusCode.CONCEPTS_INVALID_REQUEST
)
assert duplicated_post_concepts_response.status.description == "Invalid request"
assert "duplicate" in duplicated_post_concepts_response.status.details.lower()
post_concepts_searches_response = stub.PostConceptsSearches(
service_pb2.PostConceptsSearchesRequest(
concept_query=resources_pb2.ConceptQuery(name=random_concept_name)
),
metadata=metadata(),
)
raise_on_failure(post_concepts_searches_response)
assert random_concept_name in post_concepts_searches_response.concepts[0].name
patch_concepts_response = stub.PatchConcepts(
service_pb2.PatchConceptsRequest(
action="overwrite",
concepts=[resources_pb2.Concept(id=random_concept_id, name="some new concept name")],
),
metadata=metadata(),
)
raise_on_failure(patch_concepts_response)
@both_channels
def test_patching_public_concept_fails(channel):
stub = service_pb2_grpc.V2Stub(channel)
patch_concepts_searches_response = stub.PatchConcepts(
service_pb2.PatchConceptsRequest(
action="overwrite",
concepts=[
resources_pb2.Concept(
id="ai_98Xb0K3q", # The ID of a public concept.
name="this new name won't be applied",
)
],
),
metadata=metadata(),
)
assert (
patch_concepts_searches_response.status.code
== status_code_pb2.StatusCode.CONN_DOES_NOT_EXIST
)
assert patch_concepts_searches_response.status.description == "Resource does not exist"
|
the-stack_0_9807 | import bpy
import os
D = bpy.data
"""
This little script show in the Blender console textures paths
that not conform to the reference one.
"""
searchPath = r"partOfTextureName"
print("+++ search paths +++")
for img in D.images:
if img.filepath.endswith(searchPath) or img.name.endswith(searchPath):
print("found {} {}".format(img.name, img.filepath))
print("---")
for mat in bpy.data.materials:
if mat.node_tree is not None and len(mat.node_tree.nodes) > 0:
nodes = mat.node_tree.nodes
for node in nodes:
if type(node).__name__ == "ShaderNodeTexImage":
for out in node.outputs:
text_img = node.image
if text_img is not None and searchPath in text_img.filepath:
print("found on: {} > {} {}".format(mat.name, text_img.name, text_img.filepath)) |
the-stack_0_9808 | # This file is part of ZS
# Copyright (C) 2013-2014 Nathaniel Smith <[email protected]>
# See file LICENSE.txt for license information.
from contextlib import contextmanager
import math
from six import BytesIO
from nose.tools import assert_raises
from zs import ZS, ZSWriter, ZSError
from zs.common import write_length_prefixed
from .util import tempname
# some of these helpers also used in test_cmdline to test 'make'
# Each of these records is 25 bytes long
records = []
# just in case of bugs, let's make sure to have an empty record
records.append(b"")
for i in range(1000):
records.append((u"THIS IS RECORD # %08i" % (i,)).encode("utf-8"))
# and a duplicate record
records.append(b"ZZZ THIS RECORD IS REPEATED")
records.append(b"ZZZ THIS RECORD IS REPEATED")
def ok_zs(p):
z = ZS(p)
z.validate()
return z
def temp_zs_path():
return tempname(".zs", unlink_first=True)
@contextmanager
def temp_writer(**kwargs):
with temp_zs_path() as p:
kwargs.setdefault("metadata", {})
kwargs.setdefault("branching_factor", 2)
with ZSWriter(p, **kwargs) as zw:
yield (p, zw)
def identity(x):
return x
def test_add_data_block():
with temp_writer() as (p, zw):
zw.add_data_block([b"a", b"b"])
zw.add_data_block([b"c", b"z"])
zw.finish()
with ok_zs(p) as z:
z.validate()
assert list(z.block_map(identity)) == [[b"a", b"b"],
[b"c", b"z"]]
def test_write_add_file_contents_terminator():
for terminator in [b"\n", b"\x00", b"\r\n"]:
f = BytesIO(terminator.join(records) + terminator)
with temp_writer() as (p, zw):
kwargs = {}
if terminator != b"\n":
kwargs["terminator"] = terminator
# approximately 4 records per data block
zw.add_file_contents(f, 100, **kwargs)
zw.finish()
with ok_zs(p) as z:
assert list(z) == records
assert len(list(z.block_map(identity))) > len(records) / 5.0
def test_write_add_file_contents_length_prefixed():
for mode in ["uleb128", "u64le"]:
f = BytesIO()
write_length_prefixed(f, records, mode)
with temp_writer() as (p, zw):
# approximately 4 records per data block
zw.add_file_contents(BytesIO(f.getvalue()), 100,
length_prefixed=mode)
zw.finish()
with ok_zs(p) as z:
assert list(z) == records
assert len(list(z.block_map(identity))) > len(records) / 5.0
def test_write_mixed():
with temp_writer() as (p, zw):
zw.add_data_block([b"a", b"b"])
f = BytesIO(b"c\nd\n")
zw.add_file_contents(f, 10)
zw.add_data_block([b"e", b"f"])
f = BytesIO(b"\x01g\x01h")
zw.add_file_contents(f, 10, length_prefixed="uleb128")
zw.finish()
with ok_zs(p) as z:
assert list(z) == [b"a", b"b", b"c", b"d", b"e", b"f", b"g", b"h"]
def test_writer_args():
with temp_zs_path() as p:
zw = ZSWriter(p, {"a": 1}, 2, parallelism=2, codec="deflate",
codec_kwargs={"compress_level": 3},
show_spinner=False, include_default_metadata=False)
try:
zw.add_data_block([b"a", b"b"])
zw.add_data_block([b"c", b"d"])
zw.finish()
finally:
zw.close()
with ok_zs(p) as z:
assert z.metadata == {"a": 1}
assert z.codec == "deflate"
def test_no_overwrite():
with temp_zs_path() as p:
f = open(p, "wb")
f.write(b"hi\n")
f.close()
assert_raises(ZSError, ZSWriter, p, {}, 2)
def test_bad_codec():
with temp_zs_path() as p:
assert_raises(ZSError, ZSWriter, p, {}, 2, codec="SUPERZIP")
def test_trailing_record():
with temp_writer() as (p, zw):
assert_raises(ZSError, zw.add_file_contents,
BytesIO(b"a\nb\nc"), 2)
def test_from_file_terminator_long_record():
with temp_writer() as (p, zw):
# individual records longer than the approx_block_size
records = [b"a" * 100, b"b" * 100]
f = BytesIO(b"\n".join(records + [b""]))
zw.add_file_contents(f, 10)
zw.finish()
with ok_zs(p) as z:
assert list(z) == records
def test_from_file_length_prefixed_exactly_one_block():
with temp_writer() as (p, zw):
zw.add_file_contents(BytesIO(b"\x08aaaaaaaa\x04bbbb"), 10,
length_prefixed="uleb128")
zw.finish()
with ok_zs(p) as z:
assert list(z) == [b"a" * 8, b"b" * 4]
def test_closed_is_closed():
with temp_writer() as (_, zw):
zw.close()
assert_raises(ZSError, zw.add_file_contents, BytesIO(b""), 100)
assert_raises(ZSError, zw.add_data_block, [b""])
assert_raises(ZSError, zw.finish)
def test_empty():
with temp_writer() as (_, zw):
assert_raises(ZSError, zw.finish)
# empty blocks are silently dropped instead of being added
def test_no_empty_blocks():
with temp_writer() as (p, zw):
zw.add_data_block([b"a", b"b"])
zw.add_data_block([])
zw.add_file_contents(BytesIO(), 100)
zw.add_data_block([b"c", b"d"])
zw.finish()
# the implicit call to z.validate() here should error out if there are
# any empty blocks, but let's check anyway.
with ok_zs(p) as z:
assert len(list(z.block_map(identity))) == 2
def test_unsorted():
with temp_writer() as (_, zw):
with assert_raises(ZSError):
zw.add_file_contents(BytesIO(b"b\na\n"), 100)
zw.finish()
assert zw.closed
with temp_writer() as (_, zw):
with assert_raises(ZSError):
zw.add_data_block([b"b", b"a"])
zw.finish()
assert zw.closed
with temp_writer() as (_, zw):
with assert_raises(ZSError):
zw.add_data_block([b"m", b"n"])
zw.add_data_block([b"a", b"b"])
zw.finish()
assert zw.closed
def test_lengths():
# exercise all the corner cases in the index packing code
for num_blocks in range(1, 2 ** 5):
for branching_factor in [2, 3]:
block_tmpls = [(u"%04i" % (i,)).encode("utf-8")
for i in range(num_blocks)]
records = []
with temp_writer(branching_factor=branching_factor) as (p, zw):
for block_tmpl in block_tmpls:
block = [block_tmpl + suffix
for suffix in [b"a", b"b", b"c"]]
zw.add_data_block(block)
records += block
zw.finish()
with ok_zs(p) as z:
assert list(z) == records
assert (max(math.ceil(math.log(num_blocks)
/ math.log(branching_factor)),
1)
== z.root_index_level)
def test_clogged_queue():
# Failure to sort across blocks causes an error in the write worker, which
# then stops consuming from its queue. But we don't see it immediately,
# because the main process doesn't regularly check for errors. Eventually
# this causes the whole pipeline to stall. This tests that the main
# process eventually checks for errors under these conditions.
with temp_writer() as (p, zw):
zw.add_data_block([b"z"])
with assert_raises(ZSError):
while True:
zw.add_data_block([b"a"])
# Regression test: had a bug where a empty terminated chunk would cause
# alloc_hint=0 and trigger an infinite loop in pack_data_records.
def test_short_file():
with temp_writer() as (p, zw):
zw.add_file_contents(BytesIO(b"\n"), 128 * 2 ** 10)
zw.finish()
with ok_zs(p) as z:
assert list(z) == [b""]
|
the-stack_0_9809 | from os import path, rename, remove
import pytz
import datetime as dt
from flask import current_app
from flask_restful import request, Resource
from flask_apispec import marshal_with
from flask_jwt_extended import jwt_required
from webargs.flaskparser import use_kwargs
from werkzeug.utils import secure_filename
from run4it.api.templates import report_error_and_abort
from run4it.api.profile.auth_helper import get_auth_profile_or_abort
from run4it.app.database import db
from .model import Workout, WorkoutCategory as WorkoutCategoryModel
from .schema import workout_schema, workouts_schema, workout_update_schema, workout_categories_schema
from .gmaps import GeoCodeLookup
def is_valid_workout_filename(filename):
if filename is not None and filename != "":
allowed_extensions = current_app.config["ALLOWED_UPLOAD_EXTENSIONS"]
return "." in filename and filename.rsplit('.', 1)[1].lower() in allowed_extensions
else:
return False
def save_uploaded_file_or_abort(uploaded_file, profile_name):
filename = secure_filename("{0}_{1}".format(profile_name, uploaded_file.filename))
filepath = path.join(current_app.config["GPX_UPLOAD_DIR"], filename)
try:
uploaded_file.save(filepath)
except:
report_error_and_abort(422, "workout", "Workout file could not be read.")
return filepath
def get_autogenerated_workout_name(latitude, longitude, category_name):
try:
print("get_autogenerated_workout_name, {0},{1}".format(latitude, longitude))
geoLookupClient = GeoCodeLookup()
place_name = geoLookupClient.get_name_of_place(latitude, longitude)
print("place_name:", place_name)
if place_name != "":
return "{0} {1}".format(place_name, category_name)
else:
return category_name
except Exception as e:
print(e)
return category_name
def rename_uploaded_file(tmp_filepath, profile_name, workout_id):
# no try/except intentionally here, as we call this within a try block, and want to crash if rename fails :o)
filepath = ""
if is_valid_workout_filename(tmp_filepath):
extension = tmp_filepath.rsplit('.', 1)[1].lower()
filename = "{0}_workout_{1}.{2}".format(profile_name, workout_id, extension)
filepath = path.join(current_app.config["GPX_UPLOAD_DIR"], filename)
rename(tmp_filepath, filepath)
return filepath
def remove_uploaded_file(filepath):
if is_valid_workout_filename(filepath):
try:
remove(filepath)
except:
pass
def add_workout_data_to_goals(profile, workout):
goals = profile.get_active_goals(workout.start_at) # active 'as-we-speak'
if goals is not None:
for goal in goals:
goal.update_from_workout(workout)
def remove_workout_data_from_goals(profile, workout):
goals = profile.get_active_goals(workout.start_at) # active 'as-we-speak'
if goals is not None:
for goal in goals:
goal.remove_from_workout(workout)
class ProfileWorkoutList(Resource):
@jwt_required
@use_kwargs(workout_schema, error_status_code = 422, locations={"query"})
@marshal_with(workouts_schema)
def get(self, username, goal_id=None, limit=10, offset=0):
profile = get_auth_profile_or_abort(username, "workout")
if goal_id is None:
return profile.get_workouts(limit, offset)
goal = profile.get_goal_by_id(goal_id)
if goal is None:
report_error_and_abort(422, "workout", "Goal not found.")
return Workout.get_workouts_for_goal(goal)
@jwt_required
@use_kwargs(workout_update_schema, error_status_code = 422)
@marshal_with(workout_schema)
def post(self, username, name, start_at, distance, duration, category_id, climb=0, edited=False):
profile = get_auth_profile_or_abort(username, "workout")
category = WorkoutCategoryModel.get_by_id(category_id)
if category is None:
report_error_and_abort(422, "workout", "Workout category not found")
if name is None or name == "":
name = category.name
utc_start_at = start_at - start_at.utcoffset()
now = dt.datetime.utcnow().replace(tzinfo=pytz.UTC)
if utc_start_at > now:
report_error_and_abort(422, "workout", "Workout start time is in the future")
try:
new_workout = Workout(profile.id, category, name, utc_start_at, distance, duration, climb, None, edited)
new_workout.save()
add_workout_data_to_goals(profile, new_workout)
except:
db.session.rollback()
report_error_and_abort(500, "workout", "Unable to create workout.")
return new_workout, 200, {'Location': '{}/{}'.format(request.path, new_workout.id)}
class ProfileWorkout(Resource):
@jwt_required
@marshal_with(workout_schema)
def get(self, username, workout_id):
profile = get_auth_profile_or_abort(username, "workout")
workout = profile.get_workout_by_id(workout_id)
if workout is None:
report_error_and_abort(404, "workout", "Workout not found.")
if workout.category.supports_gps_data:
workout.register_extended_data()
return workout
@jwt_required
@use_kwargs(workout_update_schema, error_status_code = 422)
@marshal_with(workout_schema)
def put(self, username, workout_id, name, start_at, distance, duration, category_id, climb=None, edited=None):
profile = get_auth_profile_or_abort(username, "workout")
workout = profile.get_workout_by_id(workout_id)
if workout is None:
report_error_and_abort(422, "workout", "Workout not found")
category = WorkoutCategoryModel.get_by_id(category_id)
if category is None:
report_error_and_abort(422, "workout", "Workout category not found")
if name is None or name == "":
name = category.name
utc_start_at = start_at - start_at.utcoffset()
now = dt.datetime.utcnow().replace(tzinfo=pytz.UTC)
if utc_start_at > now:
report_error_and_abort(422, "workout", "Workout start time is in the future")
# remove data from goal before registering updated
try:
remove_workout_data_from_goals(profile, workout)
except:
db.session.rollback()
report_error_and_abort(500, "workout", "Unable to update workout")
# update category
workout.category = category
workout.name = name
workout.start_at = utc_start_at
workout.distance = distance
workout.duration = duration
if climb is not None:
workout.climb = climb
if edited is not None:
workout.edited = edited
try:
workout.save()
add_workout_data_to_goals(profile, workout)
except:
db.session.rollback()
report_error_and_abort(500, "workout", "Unable to update workout")
return workout, 200
class ProfileWorkoutGpx(Resource): # both TCX and GPX are supported
@jwt_required
@marshal_with(workout_schema)
def post(self, username, category_id):
profile = get_auth_profile_or_abort(username, "workout")
category = WorkoutCategoryModel.get_by_id(category_id)
if category is None:
report_error_and_abort(422, "workout", "Workout category not found")
if request.files is None or len(request.files) != 1 or request.files["gpxfile"] is None:
report_error_and_abort(422, "workout", "Workout file not provided.")
uploaded_file = request.files["gpxfile"]
if not is_valid_workout_filename(uploaded_file.filename):
report_error_and_abort(422, "workout", "Workout filename invalid.")
tmp_filepath = save_uploaded_file_or_abort(uploaded_file, profile.username)
# create object with temporary data and use it to parse workout file
new_workout = Workout(profile.id, category, category.name, dt.datetime.utcnow(), 0, 1, 0, tmp_filepath, False)
new_workout.register_extended_data()
parsed_summary = new_workout.extended_summary
if parsed_summary is None:
remove_uploaded_file(tmp_filepath)
db.session.rollback()
report_error_and_abort(422, "workout", "Failed to parse uploaded file")
new_workout.name = get_autogenerated_workout_name(parsed_summary.latitude, parsed_summary.longitude, new_workout.category_name)
new_workout.start_at = parsed_summary.time
new_workout.duration = parsed_summary.duration
if category.supports_gps_data:
new_workout.distance = parsed_summary.distance
new_workout.climb = parsed_summary.elevation
workout_filepath = None
try:
new_workout.save()
workout_filepath = rename_uploaded_file(tmp_filepath, profile.username, new_workout.id)
new_workout.resource_path = workout_filepath
new_workout.save()
add_workout_data_to_goals(profile, new_workout)
except:
remove_uploaded_file(tmp_filepath)
remove_uploaded_file(workout_filepath)
db.session.rollback()
report_error_and_abort(500, "workout", "Unable to create workout from file.")
return new_workout, 200, {'Location': '{}/{}'.format(request.path, new_workout.id)}
class WorkoutCategoryList(Resource):
@marshal_with(workout_categories_schema)
def get(self):
return WorkoutCategoryModel.query.order_by(WorkoutCategoryModel.name.asc()).all()
|
the-stack_0_9810 | # -*- coding: future_fstrings -*-
from .. import loader, utils
import logging, asyncio
logger = logging.getLogger(__name__)
def register(cb):
cb(AFKMod())
class AFKMod(loader.Module):
"""Provides a message saying that you are unavailable (out of office)"""
def __init__(self):
self.commands = {"afk":self.afkcmd, "unafk":self.unafkcmd}
self.config = {}
self.name = "AFK"
self._me = None
self._ratelimit = []
async def client_ready(self, client, db):
self._db = db
self._me = await client.get_me()
async def afkcmd(self, message):
""".afk [message]
If no message is provided, 'I'm AFK' will be used as default"""
if utils.get_args_raw(message):
self._db.set(__name__, "afk", utils.get_args_raw(message))
else:
self._db.set(__name__, "afk", True)
await message.edit("<code>I'm AFK</code>")
async def unafkcmd(self, message):
"""Remove the AFK status"""
self._ratelimit.clear()
self._db.set(__name__, "afk", False)
await message.edit("<code>I'm no longer AFK</code>")
async def watcher(self, message):
if message.mentioned or getattr(message.to_id, 'user_id', None) == self._me.id:
logger.debug("tagged!")
if message.from_id in self._ratelimit:
self._ratelimit.remove(message.from_id)
return
else:
self._ratelimit += [message.from_id]
user = await utils.get_user(message)
if user.is_self or user.bot or user.verified:
logger.debug("User is self, bot or verified.")
return
if self.get_afk() == True:
await message.reply("<code>I'm AFK!</code>")
elif self.get_afk() != False:
await message.reply(f"<code>{utils.escape_html(self.get_afk())}</code>")
def get_afk(self):
return self._db.get(__name__, "afk", False)
|
the-stack_0_9811 | from datetime import datetime
from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from werkzeug.utils import redirect
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///books.db'
db = SQLAlchemy(app)
class Publisher(db.Model):
pub_id = db.Column(db.Integer, primary_key=True)
pub_name = db.Column(db.String(20))
address = db.Column(db.String(20))
class Book(db.Model):
book_id = db.Column(db.Integer, primary_key = True)
book_name = db.Column(db.String(60))
author = db.Column(db.String(20))
# available
pub_id = db.Column(db.Integer, db.ForeignKey(Publisher.pub_id))
def __repr__(self):
return '<Books %r>' % self.book_id
class Member(db.Model):
mem_id = db.Column(db.Integer, primary_key=True)
mem_name = db.Column(db.String(20))
# join_date = db.Column(db.DateTime, default = datetime.utcnow)
Borrowed = db.Table('borrowed',
db.Column('book_id', db.Integer, db.ForeignKey(Book.book_id)),
db.Column('issue_date', db.DateTime, default = datetime.utcnow)
# due_date
)
@app.route('/', methods = ['POST', 'GET'])
def index():
return render_template('index.html')
@app.route('/collection', methods = ['POST', 'GET'])
def collection():
books = Book.query.order_by(Book.book_id).all()
return render_template('collection.html', books = books)
@app.route('/addbook', methods = ['POST', 'GET'])
def addbook():
if request.method == 'POST':
b_name = request.form['book_name']
a_name = request.form['author']
new_book = Book(book_name = b_name, author = a_name)
try:
db.session.add(new_book)
db.session.commit()
return redirect('/')
except:
return 'There was an error adding your book'
else:
books = Book.query.order_by(Book.book_id).all()
return render_template('addbook.html', books = books)
@app.route('/returnbook', methods = ['POST', 'GET'])
def returnbook():
if request.method == 'POST':
b_name = request.form['book_name']
a_name = request.form['author']
new_book = Book(book_name = b_name, author = a_name)
try:
db.session.add(new_book)
db.session.commit()
return redirect('/')
except:
return 'There was an error adding your book'
else:
books = Book.query.order_by(Book.book_id).all()
return render_template('returnbook.html', books = books)
@app.route('/member', methods = ['POST', 'GET'])
def member():
if request.method == 'POST':
m_name = request.form[Member.mem_name]
new_mem = Member(mem_name = m_name)
try:
db.session.add(new_mem)
db.session.commit()
return redirect('/')
except:
return 'There was an error adding the member'
else:
return render_template('member.html')
@app.route('/borrow', methods = ['POST', 'GET'])
def borrow():
books = Book.query.order_by(Book.book_id).all()
return render_template('borrow.html', books = books)
@app.route('/delete/<int:book_id>')
def delete(book_id):
book_to_delete = Book.query.get_or_404(book_id)
try:
db.session.delete(book_to_delete)
db.session.commit()
return redirect('/borrow')
except:
return 'There was a problem borrowing that book'
if __name__ == "__main__":
app.run(debug=True) |
the-stack_0_9812 | from src.ArchiveUtility import ArchiveUtility
from src.Downloader import Downloader
from src.FileOrganizer import FileOrganizer
from src.Utils import Utils
def handle_archival():
print("Would you like to use an existing CSV file in the archival of files?")
with_csv = Utils.get_string_input("Yes or no: ", ["YES", "NO"])
directory, csv_path, column_number, destination_number, force, asset_prefix, source_name, directory_prefix = [
None] * 8
if with_csv.upper() == "YES":
csv_path = Utils.get_string_input("Enter the path to your CSV (i.e 'yourdirectory/yourcsv.csv'): ")
column_number = Utils.get_string_input("Enter the column number for the file name's original name: ")
destination_number = Utils.get_string_input("Enter the column number for the file name's expected new name: ")
force = Utils.get_number_input(2,
"Would you like to change the existing files' names or copy them into a new zipped "
"directory? "
"\n1. Existing File Names\n2. Copy It\nEnter Number:")
if force == 1:
force = True
else:
force = False
elif with_csv.upper() == "NO":
asset_prefix = input("Enter the asset prefix to append to the each renamed file (press enter to have none): ")
source_name = input("Enter the source name (press enter to have none):")
directory_prefix = input("Enter the prefix for your altered directories (i.e __archive) (press enter to have "
"none): ")
directory = Utils.get_string_input(
"Enter the path to the directory containing all of the files you want to alter: ")
input("Hit enter when you are ready to run.")
ArchiveUtility(directory, asset_prefix, source_name, directory_prefix, csv_path, column_number,
destination_number, force).run()
def handle_file_organizer():
organization_types = ["Dimension"]
print("How would you like to organize your files?")
for i, o_type in enumerate(organization_types):
print(str(i + 1) + ":" + o_type)
num = Utils.get_number_input(len(organization_types), "Enter Number: ")
selected_type = organization_types[num - 1]
src = Utils.get_string_input("Enter the source directory for all of your files you want to organize: ")
dest = Utils.get_string_input("Enter the destination directory for all of the files you want to organize: ")
run_forever = Utils.get_string_input("Would you like to run this continuously? (Yes or no): ", ["YES", "NO"])
if run_forever.upper() == "YES":
run_forever = True
else:
run_forever = False
input("Hit enter when you are ready to run.")
FileOrganizer(src, dest, selected_type).organize(run_forever)
def handle_dropbox_download():
directory = input("Enter the directory path for your files (i.e /SS 01 FAMILY/TESTIMONIALS/200702): ")
destination = input("Enter the path to the folder that you want the downloaded files in: ")
api_key = input("Enter the API Key needed to access this account: ")
input("Hit enter when you are ready to run.")
Downloader(directory, destination, api_key).run()
def handle_dropbox_file_name_retrieval():
directory = input("Enter the directory path for your files (i.e /SS 01 FAMILY/TESTIMONIALS/200702): ")
api_key = input("Enter the API Key needed to access this account: ")
input("Hit enter when you are ready to run.")
Downloader(directory, None, api_key).get_files()
def run():
modes = {"Archival": handle_archival, "File Organize": handle_file_organizer, "Download":
handle_dropbox_download, "Retrieve File Names": handle_dropbox_file_name_retrieval}
print("Welcome to the Media Utility Tool")
print("What would you like to do?")
for i, mode in enumerate(modes.keys()):
print(str(i + 1) + ":" + mode)
choice = Utils.get_number_input(len(modes), "Enter number: ")
print("You selected: " + str(choice))
mode = modes[list(modes.keys())[choice - 1]]
mode()
if __name__ == '__main__':
run()
|
the-stack_0_9813 | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import mock
import six
from fuel_mirror.common import utils
from fuel_mirror.tests import base
class DictAsObj(object):
def __init__(self, d):
self.__dict__.update(d)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class TestUtils(base.TestCase):
def test_lists_merge(self):
main = [{"a": 1, "b": 2, "c": 0}, {"a": 2, "b": 3, "c": 1}]
patch = [{"a": 2, "b": 4}, {"a": 3, "b": 5}]
utils.lists_merge(
main,
patch,
key="a"
)
self.assertItemsEqual(
[{"a": 1, "b": 2, "c": 0},
{"a": 2, "b": 4, "c": 1},
{"a": 3, "b": 5}],
main
)
def test_first(self):
self.assertEqual(
1,
utils.first(0, 1, 0),
)
self.assertEqual(
1,
utils.first(None, [], '', 1),
)
self.assertIsNone(
utils.first(None, [], 0, ''),
)
self.assertIsNone(
utils.first(),
)
def test_is_subdict(self):
self.assertFalse(utils.is_subdict({"c": 1}, {"a": 1, "b": 1}))
self.assertFalse(utils.is_subdict({"a": 1, "b": 2}, {"a": 1, "b": 1}))
self.assertFalse(
utils.is_subdict({"a": 1, "b": 1, "c": 2}, {"a": 1, "b": 1})
)
self.assertFalse(
utils.is_subdict({"a": 1, "b": None}, {"a": 1})
)
self.assertTrue(utils.is_subdict({}, {"a": 1}))
self.assertTrue(utils.is_subdict({"a": 1}, {"a": 1, "b": 1}))
self.assertTrue(utils.is_subdict({"a": 1, "b": 1}, {"a": 1, "b": 1}))
@mock.patch("fuel_mirror.common.utils.open")
def test_get_fuel_settings(self, m_open):
m_open().__enter__.side_effect = [
six.StringIO(
'ADMIN_NETWORK:\n'
' ipaddress: "10.20.0.4"\n'
'FUEL_ACCESS:\n'
' user: "test"\n'
' password: "test_pwd"\n',
),
OSError
]
self.assertEqual(
{
"server": "10.20.0.4",
"user": "test",
"password": "test_pwd",
},
utils.get_fuel_settings()
)
self.assertEqual(
{},
utils.get_fuel_settings()
)
@mock.patch("fuel_mirror.common.utils.yaml")
@mock.patch("fuel_mirror.common.utils.open")
def test_load_input_data(self, open_mock, yaml_mock):
data = "$param1: $param2"
open_mock().__enter__().read.return_value = data
v = utils.load_input_data("data.yaml", param1="key", param2="value")
open_mock.assert_called_with("data.yaml", "r")
yaml_mock.load.assert_called_once_with("key: value")
self.assertIs(yaml_mock.load(), v)
|
the-stack_0_9815 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 13 14:50:35 2018
@author: Ashutosh Verma
"""
'''
Write a program that calculates and prints the value according to the given formula:
Q = Square root of [(2 * C * D)/H]
Following are the fixed values of C and H:
C is 50. H is 30.
D is the variable whose values should be input to your program in a comma-separated sequence.
Example
Let us assume the following comma separated input sequence is given to the program:
100,150,180
The output of the program should be:
18,22,24
'''
import math
c=50
h=30
value = []
items=[x for x in input().split(',')]
for d in items:
value.append(str(int(round(math.sqrt(2*c*float(d)/h)))))
print (','.join(value)) |
the-stack_0_9816 | #!/bin/env python
# pylint: disable=E1101, W0201, E1103
# E1101: reference config file variables
# W0201: Don't much around with __init__
# E1103: Use thread members
from __future__ import print_function
from builtins import range, object
import cProfile
import os
import pickle
import pstats
import random
import threading
import time
import unittest
from Utils.PythonVersion import PY3
from WMCore_t.WMSpec_t.TestSpec import testWorkload
from nose.plugins.attrib import attr
from WMComponent.JobCreator.JobCreatorPoller import JobCreatorPoller, capResourceEstimates
from WMCore.Agent.HeartbeatAPI import HeartbeatAPI
from WMCore.DAOFactory import DAOFactory
from WMCore.DataStructs.Run import Run
from WMCore.ResourceControl.ResourceControl import ResourceControl
from WMCore.Services.UUIDLib import makeUUID
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMCore.WMSpec.Makers.TaskMaker import TaskMaker
from WMQuality.Emulators import EmulatorSetup
from WMQuality.Emulators.EmulatedUnitTestCase import EmulatedUnitTestCase
from WMQuality.TestInitCouchApp import TestInitCouchApp as TestInit
class JobCreatorTest(EmulatedUnitTestCase):
"""
Test case for the JobCreator
"""
sites = ['T2_US_Florida', 'T2_US_UCSD', 'T2_TW_Taiwan', 'T1_CH_CERN']
def setUp(self):
"""
_setUp_
Setup the database and logging connection. Try to create all of the
WMBS tables. Also, create some dummy locations.
"""
super(JobCreatorTest, self).setUp()
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules=['WMCore.WMBS', 'WMCore.ResourceControl', 'WMCore.Agent.Database'],
useDefault=False)
self.couchdbname = "jobcreator_t"
self.testInit.setupCouch("%s/jobs" % self.couchdbname, "JobDump")
self.testInit.setupCouch("%s/fwjrs" % self.couchdbname, "FWJRDump")
self.configFile = EmulatorSetup.setupWMAgentConfig()
myThread = threading.currentThread()
self.daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
locationAction = self.daoFactory(classname="Locations.New")
for site in self.sites:
locationAction.execute(siteName=site, pnn=site)
# Create sites in resourceControl
resourceControl = ResourceControl()
for site in self.sites:
resourceControl.insertSite(siteName=site, pnn=site, ceName=site)
resourceControl.insertThreshold(siteName=site, taskType='Processing', maxSlots=10000, pendingSlots=10000)
self.resourceControl = resourceControl
self._setup = True
self._teardown = False
self.testDir = self.testInit.generateWorkDir()
self.cwd = os.getcwd()
# Set heartbeat
self.componentName = 'JobCreator'
self.heartbeatAPI = HeartbeatAPI(self.componentName)
self.heartbeatAPI.registerComponent()
if PY3:
self.assertItemsEqual = self.assertCountEqual
return
def tearDown(self):
"""
_tearDown_
Drop all the WMBS tables.
"""
self.testInit.clearDatabase(modules=['WMCore.WMBS', 'WMCore.ResourceControl', 'WMCore.Agent.Database'])
self.testInit.delWorkDir()
self._teardown = True
self.testInit.tearDownCouch()
EmulatorSetup.deleteConfig(self.configFile)
return
def createJobCollection(self, name, nSubs, nFiles, workflowURL='test'):
"""
_createJobCollection_
Create a collection of jobs
"""
myThread = threading.currentThread()
testWorkflow = Workflow(spec=workflowURL, owner="mnorman",
name=name, task="/TestWorkload/ReReco")
testWorkflow.create()
for sub in range(nSubs):
nameStr = '%s-%i' % (name, sub)
myThread.transaction.begin()
testFileset = Fileset(name=nameStr)
testFileset.create()
for f in range(nFiles):
# pick a random site
site = random.choice(self.sites)
testFile = File(lfn="/lfn/%s/%i" % (nameStr, f), size=1024, events=10)
testFile.setLocation(site)
testFile.create()
testFileset.addFile(testFile)
testFileset.commit()
testSubscription = Subscription(fileset=testFileset,
workflow=testWorkflow,
type="Processing",
split_algo="FileBased")
testSubscription.create()
myThread.transaction.commit()
return
def createWorkload(self, workloadName='Test'):
"""
_createTestWorkload_
Creates a test workload for us to run on, hold the basic necessities.
"""
workload = testWorkload(workloadName)
rereco = workload.getTask("ReReco")
seederDict = {"generator.initialSeed": 1001, "evtgenproducer.initialSeed": 1001}
rereco.addGenerator("PresetSeeder", **seederDict)
taskMaker = TaskMaker(workload, os.path.join(self.testDir, 'workloadTest'))
taskMaker.skipSubscription = True
taskMaker.processWorkload()
return workload
def getConfig(self):
"""
_getConfig_
Creates a common config.
"""
config = self.testInit.getConfiguration()
self.testInit.generateWorkDir(config)
# First the general stuff
config.section_("General")
config.General.workDir = os.getenv("TESTDIR", os.getcwd())
config.section_("Agent")
config.Agent.componentName = self.componentName
# Now the CoreDatabase information
# This should be the dialect, dburl, etc
config.section_("CoreDatabase")
config.CoreDatabase.connectUrl = os.getenv("DATABASE")
config.CoreDatabase.socket = os.getenv("DBSOCK")
config.component_("JobCreator")
config.JobCreator.namespace = 'WMComponent.JobCreator.JobCreator'
# The log level of the component.
# config.JobCreator.logLevel = 'SQLDEBUG'
config.JobCreator.logLevel = 'INFO'
# maximum number of threads we want to deal
# with messages per pool.
config.JobCreator.maxThreads = 1
config.JobCreator.UpdateFromResourceControl = True
config.JobCreator.pollInterval = 10
# config.JobCreator.jobCacheDir = self.testDir
config.JobCreator.defaultJobType = 'processing' # Type of jobs that we run, used for resource control
config.JobCreator.workerThreads = 4
config.JobCreator.componentDir = self.testDir
config.JobCreator.useWorkQueue = True
config.JobCreator.WorkQueueParams = {'emulateDBSReader': True}
# We now call the JobMaker from here
config.component_('JobMaker')
config.JobMaker.logLevel = 'INFO'
config.JobMaker.namespace = 'WMCore.WMSpec.Makers.JobMaker'
config.JobMaker.maxThreads = 1
config.JobMaker.makeJobsHandler = 'WMCore.WMSpec.Makers.Handlers.MakeJobs'
# JobStateMachine
config.component_('JobStateMachine')
config.JobStateMachine.couchurl = os.getenv('COUCHURL', 'cmssrv52.fnal.gov:5984')
config.JobStateMachine.couchDBName = self.couchdbname
return config
def testVerySimpleTest(self):
"""
_VerySimpleTest_
Just test that everything works...more or less
"""
# return
myThread = threading.currentThread()
config = self.getConfig()
name = makeUUID()
nSubs = 5
nFiles = 10
workloadName = 'TestWorkload'
dummyWorkload = self.createWorkload(workloadName=workloadName)
workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl')
self.createJobCollection(name=name, nSubs=nSubs, nFiles=nFiles, workflowURL=workloadPath)
testJobCreator = JobCreatorPoller(config=config)
# First, can we run once without everything crashing?
testJobCreator.algorithm()
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), nSubs * nFiles)
# Count database objects
result = myThread.dbi.processData('SELECT * FROM wmbs_sub_files_acquired')[0].fetchall()
self.assertEqual(len(result), nSubs * nFiles)
# Find the test directory
testDirectory = os.path.join(self.testDir, 'jobCacheDir', 'TestWorkload', 'ReReco')
# It should have at least one jobGroup
self.assertTrue('JobCollection_1_0' in os.listdir(testDirectory))
# But no more then twenty
self.assertTrue(len(os.listdir(testDirectory)) <= 20)
groupDirectory = os.path.join(testDirectory, 'JobCollection_1_0')
# First job should be in here
listOfDirs = []
for tmpDirectory in os.listdir(testDirectory):
listOfDirs.extend(os.listdir(os.path.join(testDirectory, tmpDirectory)))
self.assertTrue('job_1' in listOfDirs)
self.assertTrue('job_2' in listOfDirs)
self.assertTrue('job_3' in listOfDirs)
jobDir = os.listdir(groupDirectory)[0]
jobFile = os.path.join(groupDirectory, jobDir, 'job.pkl')
self.assertTrue(os.path.isfile(jobFile))
f = open(jobFile, 'rb')
job = pickle.load(f)
f.close()
self.assertEqual(job.baggage.PresetSeeder.generator.initialSeed, 1001)
self.assertEqual(job.baggage.PresetSeeder.evtgenproducer.initialSeed, 1001)
self.assertEqual(job['workflow'], name)
self.assertEqual(len(job['input_files']), 1)
self.assertEqual(os.path.basename(job['sandbox']), 'TestWorkload-Sandbox.tar.bz2')
return
@attr('performance', 'integration')
def testProfilePoller(self):
"""
Profile your performance
You shouldn't be running this normally because it doesn't do anything
"""
name = makeUUID()
nSubs = 5
nFiles = 1500
workloadName = 'TestWorkload'
workload = self.createWorkload(workloadName=workloadName)
workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl')
self.createJobCollection(name=name, nSubs=nSubs, nFiles=nFiles, workflowURL=workloadPath)
config = self.getConfig()
testJobCreator = JobCreatorPoller(config=config)
cProfile.runctx("testJobCreator.algorithm()", globals(), locals(), filename="testStats.stat")
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Processing")
time.sleep(10)
self.assertEqual(len(result), nSubs * nFiles)
p = pstats.Stats('testStats.stat')
p.sort_stats('cumulative')
p.print_stats(.2)
return
@attr('integration')
def testProfileWorker(self):
"""
Profile where the work actually gets done
You shouldn't be running this one either, since it doesn't test anything.
"""
name = makeUUID()
nSubs = 5
nFiles = 500
workloadName = 'TestWorkload'
workload = self.createWorkload(workloadName=workloadName)
workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl')
self.createJobCollection(name=name, nSubs=nSubs, nFiles=nFiles, workflowURL=workloadPath)
config = self.getConfig()
configDict = {"couchURL": config.JobStateMachine.couchurl,
"couchDBName": config.JobStateMachine.couchDBName,
'jobCacheDir': config.JobCreator.jobCacheDir,
'defaultJobType': config.JobCreator.defaultJobType}
subs = [{"subscription": 1}, {"subscription": 2}, {"subscription": 3}, {"subscription": 4},
{"subscription": 5}]
testJobCreator = JobCreatorPoller(**configDict)
cProfile.runctx("testJobCreator.algorithm(parameters = input)", globals(), locals(), filename="workStats.stat")
p = pstats.Stats('workStats.stat')
p.sort_stats('cumulative')
p.print_stats(.2)
return
@attr('integration')
def testHugeTest(self):
"""
Don't run this one either
"""
myThread = threading.currentThread()
config = self.getConfig()
name = makeUUID()
nSubs = 10
nFiles = 5000
workloadName = 'Tier1ReReco'
dummyWorkload = self.createWorkload(workloadName=workloadName)
workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl')
self.createJobCollection(name=name, nSubs=nSubs, nFiles=nFiles, workflowURL=workloadPath)
testJobCreator = JobCreatorPoller(config=config)
# First, can we run once without everything crashing?
startTime = time.time()
testJobCreator.algorithm()
stopTime = time.time()
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), nSubs * nFiles)
print("Job took %f seconds to run" % (stopTime - startTime))
# Count database objects
result = myThread.dbi.processData('SELECT * FROM wmbs_sub_files_acquired')[0].fetchall()
self.assertEqual(len(result), nSubs * nFiles)
return
def stuffWMBS(self, workflowURL, name):
"""
_stuffWMBS_
Insert some dummy jobs, jobgroups, filesets, files and subscriptions
into WMBS to test job creation. Three completed job groups each
containing several files are injected. Another incomplete job group is
also injected. Also files are added to the "Mergeable" subscription as
well as to the output fileset for their jobgroups.
"""
locationAction = self.daoFactory(classname="Locations.New")
locationAction.execute(siteName="s1", pnn="somese.cern.ch")
mergeFileset = Fileset(name="mergeFileset")
mergeFileset.create()
bogusFileset = Fileset(name="bogusFileset")
bogusFileset.create()
mergeWorkflow = Workflow(spec=workflowURL, owner="mnorman",
name=name, task="/TestWorkload/ReReco")
mergeWorkflow.create()
mergeSubscription = Subscription(fileset=mergeFileset,
workflow=mergeWorkflow,
split_algo="ParentlessMergeBySize")
mergeSubscription.create()
dummySubscription = Subscription(fileset=bogusFileset,
workflow=mergeWorkflow,
split_algo="ParentlessMergeBySize")
file1 = File(lfn="file1", size=1024, events=1024, first_event=0,
locations={"somese.cern.ch"})
file1.addRun(Run(1, *[45]))
file1.create()
file2 = File(lfn="file2", size=1024, events=1024, first_event=1024, locations={"somese.cern.ch"})
file2.addRun(Run(1, *[45]))
file2.create()
file3 = File(lfn="file3", size=1024, events=1024, first_event=2048, locations={"somese.cern.ch"})
file3.addRun(Run(1, *[45]))
file3.create()
file4 = File(lfn="file4", size=1024, events=1024, first_event=3072, locations={"somese.cern.ch"})
file4.addRun(Run(1, *[45]))
file4.create()
fileA = File(lfn="fileA", size=1024, events=1024, first_event=0, locations={"somese.cern.ch"})
fileA.addRun(Run(1, *[46]))
fileA.create()
fileB = File(lfn="fileB", size=1024, events=1024, first_event=1024, locations={"somese.cern.ch"})
fileB.addRun(Run(1, *[46]))
fileB.create()
fileC = File(lfn="fileC", size=1024, events=1024, first_event=2048, locations={"somese.cern.ch"})
fileC.addRun(Run(1, *[46]))
fileC.create()
fileI = File(lfn="fileI", size=1024, events=1024, first_event=0, locations={"somese.cern.ch"})
fileI.addRun(Run(2, *[46]))
fileI.create()
fileII = File(lfn="fileII", size=1024, events=1024, first_event=1024, locations={"somese.cern.ch"})
fileII.addRun(Run(2, *[46]))
fileII.create()
fileIII = File(lfn="fileIII", size=1024, events=1024, first_event=2048, locations={"somese.cern.ch"})
fileIII.addRun(Run(2, *[46]))
fileIII.create()
fileIV = File(lfn="fileIV", size=1024 * 1000000, events=1024, first_event=3072, locations={"somese.cern.ch"})
fileIV.addRun(Run(2, *[46]))
fileIV.create()
for fileObj in [file1, file2, file3, file4, fileA, fileB, fileC, fileI, fileII, fileIII, fileIV]:
mergeFileset.addFile(fileObj)
bogusFileset.addFile(fileObj)
mergeFileset.commit()
bogusFileset.commit()
return
def testTestNonProxySplitting(self):
"""
_TestNonProxySplitting_
Test and see if we can split things without a proxy.
"""
config = self.getConfig()
config.JobCreator.workerThreads = 1
name = makeUUID()
workloadName = 'TestWorkload'
workload = self.createWorkload(workloadName=workloadName)
workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl')
self.stuffWMBS(workflowURL=workloadPath, name=name)
testJobCreator = JobCreatorPoller(config=config)
testJobCreator.algorithm()
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), 1)
result = getJobsAction.execute(state='Created', jobType="Merge")
self.assertEqual(len(result), 0)
return
def testCapResourceEstimates(self):
"""
_testCapResourceEstimates_
Test capResourceEstimates function to make sure the glideinwms
constraints are being properly considered.
"""
class JobGroup(object):
"""Dummy object holding a jobs attr full of jobs"""
def __init__(self):
self.jobs = []
constraints = {'MaxRequestDiskKB': 20971520, 'MinRequestDiskKB': 1048576,
'MaxWallTimeSecs': 162000, 'MinWallTimeSecs': 3600}
jobGroups = []
jobGroup = JobGroup()
jobGroup.jobs.append({'estimatedJobTime': None, 'estimatedDiskUsage': None})
jobGroup.jobs.append({'estimatedJobTime': 0, 'estimatedDiskUsage': 0})
jobGroup.jobs.append({'estimatedJobTime': 10000, 'estimatedDiskUsage': 10 * 1000 * 1000})
jobGroup.jobs.append({'estimatedJobTime': 200000, 'estimatedDiskUsage': 100 * 1000 * 1000})
jobGroups.append(jobGroup)
capResourceEstimates(jobGroups, constraints)
self.assertItemsEqual(jobGroup.jobs[0], {'estimatedJobTime': 3600, 'estimatedDiskUsage': 1048576})
self.assertItemsEqual(jobGroup.jobs[1], {'estimatedJobTime': 3600, 'estimatedDiskUsage': 1048576})
self.assertItemsEqual(jobGroup.jobs[2], {'estimatedJobTime': 10000, 'estimatedDiskUsage': 10 * 1000 * 1000})
self.assertItemsEqual(jobGroup.jobs[3], {'estimatedJobTime': 162000, 'estimatedDiskUsage': 20971520})
return
if __name__ == "__main__":
unittest.main()
|
the-stack_0_9818 | #!/usr/bin/env python
# Python version 3.4+
import sys
import os
import re
import math
import requests
# Simple ranged download script. For those times when the other end just decides
# to close the file stream and you end up with partial files. This fixes
# that issue.
# -> Requests is required. Use 'pip install requests' to download the module.
# This download script is partially extracted from my Bandcamp downloader,
# Campdown.
# The first argument is the url to download the file from.
# The second argument is the optional output folder the file should be written to.
# If none is specified the folder this script is in will be used.
# Check that this file's main function is not being called from another file.
if __name__ == "__main__":
try:
# Fetch the program arguments and make sure that they are valid.
try:
url = sys.argv[1].replace("\"", "")
except:
print("\nMissing required URL argument")
sys.exit(2)
if "http://" not in url and "https://" not in url:
print("\n%s is not a valid URL" % url)
sys.exit(2)
# Get the path of the current execution folder.
folder = os.path.split(os.path.abspath(
__file__).replace("\\", "/"))[0] + "/"
name = re.findall("(?=\w+\.\w{3,4}$).+", url)[0]
# Get the size of the remote file.
full_response = requests.get(url, stream=True)
total_length = full_response.headers.get("content-length")
# Open a file stream which will be used to save the output string
with open(folder + "/" + re.sub("[\\/:*?<>|]", "", name), "wb") as f:
# Make sure that the printed string is compatible with the user"s command line. Else, encode.
# This applies to all other print arguments throughout this file.
try:
print("Downloading: %s" % name)
except UnicodeEncodeError:
try:
print("Downloading: %s" % name.encode(
sys.stdout.encoding, errors="replace").decode())
except UnicodeDecodeError:
print("Downloading: %s" % name.encode(
sys.stdout.encoding, errors="replace"))
# If the file is empty simply write out the returned content from
# the request.
if total_length is None:
f.write(full_response.content)
else:
# Storage variables used while evaluating the already
# downloaded data.
dl = 0
total_length = int(total_length)
cleaned_length = int((total_length * 100) / pow(1024, 2)) / 100
block_size = 2048
for i in range(math.ceil(total_length / 1048576)):
response = requests.get(url, headers={
"Range": "bytes=" + str(i * 1048576) + "-" + str((i + 1) * (1048576) - 1)}, stream=True)
for chunk in response.iter_content(chunk_size=block_size):
# Add the length of the chunk to the download size and
# write the chunk to the file.
dl += len(chunk)
f.write(chunk)
# Display a loading bar based on the currently download
# filesize.
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s%s] %sMB / %sMB " % ("=" * done, ">", " " * (
50 - done), (int(((dl) * 100) / pow(1024, 2)) / 100), cleaned_length))
sys.stdout.flush()
# Insert a new line for formatting-OCD's sake.
print("\n")
except (KeyboardInterrupt):
print("Interrupt caught - exiting program...")
sys.exit(2)
|
the-stack_0_9821 | from io import StringIO
import os
import numpy as np
import pandas as pd
from .. import fem_attribute
class ListStringSeries():
def __init__(self, list_string_series):
self._list_string_series = list_string_series
return
def __len__(self):
return len(self._list_string_series)
def __getitem__(self, key):
if isinstance(key, int):
return self._list_string_series[key]
elif isinstance(key, list):
return [self[i] for i in key]
else:
raise ValueError(f"Unexpected key: {key}")
def strip(self):
return [s.strip() for s in self]
def expand_include(self, pattern, base_name):
return [s.expand_include(pattern, base_name) for s in self]
class StringSeries(pd.Series):
def __init__(self, *args, **kw):
if len(args) == 0 or len(args[0]) == 0:
kw['dtype'] = object
super().__init__(*args, **kw)
@property
def _constructor(self):
return StringSeries
@classmethod
def read_file(cls, file_name, *, pattern_ignore=None):
"""Read file and convert to numpy string array.
Args:
file_name: String of file name.
pattern_ignore: String to be used for ignore unecessary line
e.g. comment.
Returns:
StringDataFrame object. Each component corresponds to each line of
the input file.
"""
print(f"Reading file: {file_name}")
s = pd.read_csv(
file_name, header=None, index_col=None, sep='@', dtype=str)[0]
# sep='@' because don't want to separate
if pattern_ignore is None:
return cls(s)
else:
return cls(s).find_match(
pattern_ignore, negative_match=True)
@classmethod
def read_files(cls, file_names, *, pattern_ignore=None, separate=False):
"""Read files.
Args:
file_names: Array of strings indicating file names.
pattern_ignore: String to be used for ignore unecessary line
e.g. comment.
separate: bool
If True, return separated contents, namely, ListStringSeries
object.
Returns:
StringDataFrame object. Each component corresponds to each line of
input files (contents are concatenated).
"""
if separate:
list_string_series = ListStringSeries([
cls.read_file(file_name, pattern_ignore=pattern_ignore)
for file_name in file_names])
if len(list_string_series) == 1:
return list_string_series[0]
else:
return list_string_series
else:
return cls(pd.concat([
cls.read_file(file_name, pattern_ignore=pattern_ignore)
for file_name in file_names]))
@classmethod
def read_array(cls, _array, *, delimiter=',', str_format=None):
"""Read array to make StringSeries object.
Args:
array: Ndarray or list of NDarray to make StringSeries object.
delimiter: String indicating delimiter to connect components in
a raw (default: ',').
str_format: Format string to be passed to numpy.savetxt.
Returns: StringSeries object after reading arrays.
"""
array = np.asarray(_array)
if str_format is None and 'float' in str(array.dtype):
str_format = '%.8E'
if len(array.shape) == 1:
if str_format is None:
try:
str_array = array.astype(str)
return cls(str_array)
except ValueError:
return cls.read_array(
array[:, None], delimiter=delimiter,
str_format=str_format)
else:
sio = StringIO()
np.savetxt(sio, array, fmt=str_format)
return cls(sio.getvalue().split('\n')[:-1])
elif len(array.shape) == 2 and array.shape[1] == 1:
if str_format is None:
try:
converted_array = array.astype(str)
# Array can be converted to other types
return cls(converted_array[:, 0])
except ValueError:
# Array is realy object
return cls(np.array([
'\n'.join(delimiter.join(a) for a in arr.astype(str))
for arr in array[:, 0]
]))
else:
sio = StringIO()
np.savetxt(sio, array[:, 0], fmt=str_format)
return cls(sio.getvalue().split('\n')[:-1])
elif len(array.shape) > 2:
raise ValueError(f"Too high dimensions: {array.shape}")
else:
pass
a0 = array[:, 0]
if str_format is None:
s = cls(a0.astype(str))
for a in array[:, 1:].T:
s = s.connect(a.astype(str))
else:
sio = StringIO()
np.savetxt(sio, a0, fmt=str_format)
s = cls(sio.getvalue().split('\n')[:-1])
for a in array[:, 1:].T:
sio = StringIO()
np.savetxt(sio, a, fmt=str_format)
s = s.connect(sio.getvalue().split('\n')[:-1])
return s
@classmethod
def connect_all(cls, list_data, delimiter=',', str_format=None):
if len(list_data) == 0:
return cls()
if str_format is None:
str_format = [None] * len(list_data)
elif isinstance(str_format, str):
str_format = [str_format] * len(list_data)
if len(list_data) != len(str_format):
raise ValueError(
'When str_format is list, the length should be'
'the same as that of list_data'
f"({len(str_format)} vs {len(list_data)})")
s = cls.read_array(list_data[0], str_format=str_format[0])
for d, f in zip(list_data[1:], str_format[1:]):
s = s.connect(
cls.read_array(d, str_format=f), delimiter=delimiter)
return s
@classmethod
def concat(cls, list_data, axis=0):
return cls(pd.concat(list_data, axis=axis))
def to_header_data(self, pattern):
matches = self.str.match(pattern).values
headers = self[matches]
match_indices = np.concatenate([np.where(matches)[0], [len(self)]])
list_indices = [
range(i1+1, i2) for i1, i2
in zip(match_indices[:-1], match_indices[1:])]
return HeaderData(headers, list_indices, data=self)
# header_dict = {
# header: self[i1+1:i2] for header, i1, i2
# in zip(headers, match_indices[:-1], match_indices[1:])}
# return HeaderData(header_dict)
def strip(self):
return self.str.strip()
def extract_captures(self, pattern, *, convert_values=False):
captures = self.str.extract(pattern, expand=False)
captures = captures[~pd.isnull(captures)]
if convert_values:
return captures.values
else:
return captures
def find_match(self, pattern, *, allow_multiple_matches=True,
convert_values=False, negative_match=False):
"""Find match to the specified pattern.
Args:
pattern: Pattern to be used for matching.
allow_multiple_matches: True to accept several matches.
(Default = True)
convert_values: Bool, [True]
Flag to convert StringSeries to values
Returns:
StringSeries or ndarray of matches.
"""
if negative_match:
match = self[~self.str.contains(pattern)]
else:
match = self[self.str.contains(pattern)]
if not allow_multiple_matches and len(match) > 1:
raise ValueError(f"{len(match)} matches found. Expected 1.")
if convert_values:
return match.values
else:
return match
def expand_include(self, pattern, base_name):
"""Expand data like 'include' statement. Expanded data is concatenated
at the end of the non-expanded data.
Args:
pattern: Pattern showing include statement. Include file should be
captured with the first expression.
base_name: Directory name of the include file location.
Returns:
StringSeries object after expansion.
"""
captures = self.extract_captures(pattern)
include_files = [os.path.join(base_name, c) for c in captures]
if len(include_files) == 0:
return self
include_ss = StringSeries.read_files(include_files)
return pd.concat([self, include_ss], ignore_index=True)
def to_fem_attribute(self, name, id_column, slice_data_columns, *,
data_type=float, delimiter=',',
data_unit='unit_unknown', generate_id2index=False):
"""Generate FEMAttribute object with parsing the series.
Args:
name: String indicating name of the attribute.
lines: Ndarray of strings contains data.
id_column: Int indicating the column of ids.
slice_data_columns: Slice object indicating the columns of data.
data_type: Type of the data (default: float)
delimiter: String of delimiter. (default: ',')
data_unit: String indicating unit of data.
(default: 'unit_unknown')
generate_id2index: bool
If True, generate pandas.DataFrame of IDs and indices.
Returns:
femio.FEMAttribute object.
"""
df = self.str.split(delimiter, expand=True)
ids = df.values[:, id_column].astype(float).astype(int)
data = df.values[:, slice_data_columns].astype(data_type)
return fem_attribute.FEMAttribute(
name, ids, data, data_unit=data_unit,
generate_id2index=generate_id2index)
def to_values(
self, delimiter=',', data_type=float, to_rank1=False,
until_column=None):
"""Delimit StringLines object with the specified delimiter to output
ndarray of the specified data_type.
Args:
delimiter: String of delimiter (default: ',').
data_type: Type of output data (default: float).
to_rank1: Boolean to control output (True: rank-1, False: rank-2,
default: False)
until_column: int, optional, [None]
Read until the specified column.
Returns:
Ndarray of the specified data_type.
"""
data = self.delimit(delimiter).astype(data_type)[:, :until_column]
# except ValueError:
# raise ValueError(self)
if to_rank1:
return np.concatenate(data)
else:
return data
def delimit(self, delimiter=','):
"""Delimit StringLines object with the specified delimiter to output
rank-2 ndarray of strings.
Args:
delimiter: String of delimiter (default: ',').
Returns:
rank-2 ndarray of string.
"""
return self.str.split(delimiter, expand=True).values
def split_vertical(self, index_cut, delimiter=','):
"""Split StringSeries object vertically.
Args:
index_cut: Index (= start index of 2nd obj) to cut the StringLines.
Return:
2-tuple of DataFrame objects after splitting.
"""
if len(self) == 0:
return (pd.DataFrame([]), pd.DataFrame([]))
if index_cut == 0:
pattern = f"([^{delimiter}]*){delimiter}(.*)"
else:
pattern = \
f"((?:[^{delimiter}]*{delimiter}){{{index_cut - 1}}}" \
+ f"[^{delimiter}]*){delimiter}(.*)"
df_split = self.str.extract(pattern, expand=True)
return (StringSeries(df_split[0]), StringSeries(df_split[1]))
def split_vertical_all(self, delimiter=','):
"""Split StringSeries object vertically. Output will be n StringSeries
objects.
Args:
Return:
n-tuple of StringSeries objexts after splitting.
"""
if len(self) == 0:
return (StringSeries([]), )
delimitted_data = self.delimit(delimiter)
return [StringSeries(d.T) for d in delimitted_data.T]
def connect(self, other, delimiter=','):
"""Connect two StringSeries objects with specified delimiter.
Lengths of two objects should be the same.
Args:
other: Other StringSeries object to be connected.
delimiter: String to appear at the connection.
Return:
StringSeries object after connection.
"""
if len(other) == 0:
return self
if len(self) != len(other):
raise ValueError('Dimension different: {} vs {}'.format(
len(self), len(other)))
return StringSeries(self.str.cat(
StringSeries(other).values, sep=delimiter, join='left'))
def indices_match_clusters(self, pattern, *, negative_match=False):
"""Make cluster of indices of matches. Cluster means a group with
continuous indices.
Args:
pattern: Pattern to be used for matching.
Returns:
list of ndarrays containing indices of each cluster.
"""
indices_matches = self.indices_matches(
pattern, negative_match=negative_match)
diff_ind = np.diff(indices_matches)
separation_indices = [i + 1 for i, d in enumerate(diff_ind) if d > 1]
start_indices = [0] + separation_indices
stop_indices = separation_indices + [len(indices_matches)]
return [indices_matches[i1:i2] for i1, i2
in zip(start_indices, stop_indices)]
def indices_matches(self, pattern, *, negative_match=False):
"""Return indices of matched lines.
Args:
pattern: Pattern to be used for matching.
Returns:
Ndarray of ints indicating indices of matched lines.
"""
matches = self.astype(str).str.contains(pattern)
if negative_match:
matches = ~matches
if np.all(~matches):
raise ValueError('No match found for: {}'.format(pattern))
return np.array(range(len(matches)))[matches]
def to_dict_fem_attributes(self, names, component_nums,
data_units=None, delimiter=','):
"""Generate dict of FEMAttribute objects with parsing the lines.
Args:
names: List of strings indicating names of the attributes.
component_nums: List of ints indicating # of components of each
attributes.
data_units: List of strings indicating unit of data.
(default: 'unit_unknown')
Returns:
Dict with key = name, value = fem.FEMAttribute.
"""
if data_units is None:
data_units = ['unit_unknown' for _ in names]
nums = np.concatenate([[0], np.cumsum(component_nums)]) + 1
ranges = [range(n1, n2) for n1, n2 in zip(nums[:-1], nums[1:])]
return {name: self.to_fem_attribute(
name, 0, r, delimiter=delimiter, data_unit=unit)
for name, r, unit in zip(names, ranges, data_units)}
class HeaderData():
def __init__(self, headers, list_indices, data):
if len(headers) != len(list_indices):
raise ValueError(
f"Length different: {len(headers)} vs {len(list_indices)}")
self.dict = data
self.headers = headers
self.list_indices = np.array([
np.array(indices) for indices in list_indices], dtype=object)
self.data = data
def extract_headers(self, key):
return self.headers.find_match(key)
def extract_data(self, key, *, concatenate=True):
indices = self.headers.str.contains(key)
if not np.any(indices):
return StringSeries([])
if concatenate:
concatenated_indices = np.concatenate(
self.list_indices[indices])
return self.data.iloc[concatenated_indices]
else:
return [self.data.iloc[index]
for index in self.list_indices[indices]]
|
the-stack_0_9822 | # -*- coding: utf-8 -*-
import requests, json
import numpy as np
import scipy.interpolate as si
from scipy.optimize import brentq
from functools import partial
_error_msg = {
1: 'Parameter t must be a list or an array that represents knot vector.',
2: 'Method parameter must be one of: "interp", "smooth", "lsq".'
}
def LoadSpline(curve_id_or_url):
url_split = curve_id_or_url.split("/")
if len(url_split) > 1:
url = curve_id_or_url
else:
curve_id = url_split[-1]
if "spl_" not in curve_id or len(curve_id) != 16:
raise ValueError("Wrong curve id was specified")
url = "https://splinecloud.com/api/curves/id/{}".format(curve_id)
response = requests.get(url)
curve = json.loads(response.content)
curve_params = curve['spline']
t = np.array(curve_params['t'])
c = np.array(curve_params['c'])
w = curve_params['w']
tcck = t, c[:, 0], c[:, 1], curve_params['k']
return ParametricUnivariateSpline.from_tcck(tcck)
class PPolyInvertible(si.PPoly):
"""Piecewise polynomial with ability to evaluate inverse dependency x(y)"""
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
# self = super(PPolyInvertible, cls).construct_fast(c, x, extrapolate=extrapolate, axis=axis)
self = super(PPolyInvertible, cls).construct_fast(
c, x, extrapolate=extrapolate)
self.k = len(self.c) - 1
self.powers = np.arange(self.k, -1, -1)
self.intervals = self._form_intervals(self.x)
# self.powers = np.arange(self.k, 0, -1)
return self
@classmethod
def from_splinefunc(cls, spline):
self = cls.from_spline(spline.tck)
self.project_intervals(spline)
return self
def eval_oninterval(self, n, numpoints=50):
coeffs = self.c.T[n + self.k]
tbreak = self.x[n + self.k]
a = self.intervals[n][0]
b = self.intervals[n][1]
tpoints = np.linspace(a, b, numpoints)
ppoints = np.zeros(len(tpoints))
i = 0
for t in tpoints:
ppoints[i] = self.eval_poly(t, coeffs, tbreak)
i += 1
return ppoints
def _get_poly(self, t, n, xvalue=0):
coeffs = self.c.T[n + self.k]
tbreak = self.x[n + self.k]
poly = self.eval_poly(t, coeffs, tbreak)
return poly - xvalue
def eval_poly(self, t, coeffs, tbreak):
# poly = coeffs[0]*(t - tbreak)**3 + coeffs[1]*(t - tbreak)**2 + coeffs[2]*(t - tbreak) + coeffs[3]
poly = 0
for c, p in zip(coeffs, self.powers):
poly += c*(t - tbreak)**p
return poly
def _get_interval(self, coord, intervals):
i = 0
for interval in intervals:
if coord >= interval[0] and coord <= interval[1]:
return i
else:
i += 1
return None
def _form_intervals(self, breaks):
# n = len(breaks) - 1
n = len(breaks) - 2*self.k - 1
intervals = np.zeros((n, 2))
i = self.k
for interval in intervals:
interval[0], interval[1] = breaks[i], breaks[i + 1]
i += 1
return intervals
def project_intervals(self, sf):
breaks = sf(self.x)
self.pintervals = self._form_intervals(breaks)
def _check_monotonous(self, intervals):
check = True
for interval in intervals:
if interval[1] < interval[0]:
check = False
break
return check
def evalinv(self, x):
pinterval = self._get_interval(x, self.pintervals)
if pinterval is not None:
interval = self.intervals[pinterval]
t = brentq(partial(self._get_poly, n=pinterval, xvalue=x),
interval[0], interval[1])
return t
else:
return None
class ParametricUnivariateSpline(object):
"""
One-dimensional parametric spline fit to a given set of data points.
Fits a spline x, y = spl(t) of degree `k` to the provided `x`, `y` data.
If fitting method is set to "interp" spline will interpolate
through all data points.
If fitting method is set to "smooth" then normalized smoothing
factor sn will be used to choose the number of knots.
Regular smoothing factor s used by underlying spline functions is evaluated as:
s = sn*sum((y_data[i])**2)
If fitting method is set to "lsq" and internal knot vector t is not specified
then uniform knot vector of length nk will be used to create least squares
spline approximation.
"""
def __init__(self, x_data, y_data, t=None, method="interp",
sn=None, k=3, w=None, nk=3, bbox=[None]*2):
self.x_data = x_data
self.y_data = y_data
self.k = k
self.data_len = len(self.x_data)
self.xmax, self.xmin = max(x_data), min(x_data)
self.nk = nk
if w is None:
w_ = np.ones(self.data_len)
else:
w_ = np.array(w)
## sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
# sscale = sum( (y_data[i])**2 for i in range(self.data_len))
if sn is not None:
spl_smax = si.LSQUnivariateSpline(
x_data, y_data, [], k=k, w=w, bbox=bbox)
s_data = [spl_smax(d) for d in x_data]
smax = sum((w_[i]*(y_data[i] - s_data[i]))**2
for i in range(self.data_len))
s = sn*smax
else:
s = None
if method == "interp":
# self.splinefunc = si.InterpolatedUnivariateSpline(x_data, y_data)
self.splinefunc = si.UnivariateSpline(
x_data, y_data, k=k , s=0.0, w=w, bbox=bbox)
elif method == "smooth":
self.splinefunc = si.UnivariateSpline(
x_data, y_data, k=k , s=s, w=w, bbox=bbox)
elif method == "lsq":
if t is None:
knots = self._uniform_knotvector(self.nk)
elif len(t) > 0:
knots = t
else:
raise ValueError(_error_msg[0])
self.splinefunc = si.LSQUnivariateSpline(
x_data, y_data, knots, k=k, w=w, bbox=bbox)
else:
raise ValueError(_error_msg[1])
knots = self.splinefunc.get_knots()
self.knots = self._form_knotvector(knots)
self.knots_norm = self._normalize_knotvector( d=self.data_len)
# self.knots_norm = self._normalize_knotvector(self.knots) #newfix
self.coeffs_x = self._get_controlpoints(self.knots)
self.coeffs_y = self.splinefunc.get_coeffs()
self.coeffs_t = self._get_controlpoints(self.knots_norm)
self._build_splines(self.coeffs_x, self.coeffs_y)
self._get_ppolyrep()
@classmethod
def from_tcck(cls, tcck):
"""Construct a parametric spline object from given tcck"""
self = cls.__new__(cls)
t, cx, cy, k = tcck
self.k = k
self.knots = t
self.knots_norm = self._normalize_knotvector()
self.coeffs_x = cx
self.coeffs_y = cy
self._build_splines(self.coeffs_x, self.coeffs_y)
self._get_ppolyrep()
return self
def __call__(self, tpoints):
x_points = self.spline_x(tpoints)
y_points = self.spline_y(tpoints)
return x_points, y_points
def eval(self, x):
if hasattr(x, '__iter__'):
t = np.array([self.spline_x.ppoly.evalinv(xi) for xi in x])
return self.spline_y.ppoly(t)
else:
t = self.spline_x.ppoly.evalinv(x)
return self.spline_y.ppoly(t)
def get_polypoints(self, n):
xpoints = self.spline_x.ppoly.eval_oninterval(n)
ypoints = self.spline_y.ppoly.eval_oninterval(n)
return xpoints, ypoints
def _get_ppolyrep(self):
self.spline_x.ppoly = PPolyInvertible.from_splinefunc(self.spline_x)
self.spline_y.ppoly = PPolyInvertible.from_splinefunc(self.spline_y)
def polyrep(self, tpoints):
return self.spline_x.ppoly(tpoints), self.spline_y.ppoly(tpoints)
def _build_splines(self, coeffs_x, coeffs_y):
tck_x = self.knots_norm, coeffs_x, self.k
tck_y = self.knots_norm, coeffs_y, self.k
self.spline_x = si.UnivariateSpline._from_tck(tck_x)
self.spline_y = si.UnivariateSpline._from_tck(tck_y)
self.spline_x.tck = tck_x
self.spline_y.tck = tck_y
def _form_knotvector(self, knots):
knots_full = np.concatenate(
([knots[0]]*self.k, knots, [knots[-1]]*self.k ))
return knots_full
def _normalize_knotvector(self, knots=None, d=1.0):
if knots is None: knots = self.knots
num_knots = len(knots)
ka = (knots[-1] - knots[0]) / d
knots_norm = np.empty(num_knots)
for i in range(num_knots):
knots_norm[i] = d - ((knots[-1] - knots[i])) / ka
return knots_norm
def _get_controlpoints(self, knots):
n = len(knots) - 1 - self.k
cpoints = np.empty(n)
for i in range(n):
tsum = 0
for j in range(1, self.k + 1):
tsum += knots[i + j]
cpoints[i] = tsum/float(self.k)
return cpoints
def _uniform_knotvector(self, nk):
if nk == 0:
return []
elif nk == 1:
return [(self.xmax - self.xmin) / 2.0 + self.xmin]
else:
knot_offset = float(self.xmax - self.xmin) / nk
# ks = self.xmin + knotdist
# ke = self.xmax - knotdist
knots = np.linspace(self.xmin, self.xmax, nk+2)
knots = knots[1:-1]
# knots = np.linspace(knot_offset, self.xmax-knot_offset, nk-2)
return knots
|
the-stack_0_9823 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""drop_user_and_chart
Revision ID: cf5dc11e79ad
Revises: 41f5f12752f8
Create Date: 2019-01-24 15:30:35.834740
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = 'cf5dc11e79ad'
down_revision = '41f5f12752f8'
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happing to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
if 'known_event' in inspector.get_table_names() != 'sqlite':
op.drop_constraint('known_event_user_id_fkey', 'known_event')
op.drop_table("chart")
op.drop_table("users")
def downgrade(): # noqa: D103
conn = op.get_bind()
op.create_table(
'users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=250), nullable=True),
sa.Column('email', sa.String(length=500), nullable=True),
sa.Column('password', sa.String(255)),
sa.Column('superuser', sa.Boolean(), default=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table(
'chart',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=200), nullable=True),
sa.Column('conn_id', sa.String(length=250), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('chart_type', sa.String(length=100), nullable=True),
sa.Column('sql_layout', sa.String(length=50), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('y_log_scale', sa.Boolean(), nullable=True),
sa.Column('show_datatable', sa.Boolean(), nullable=True),
sa.Column('show_sql', sa.Boolean(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('default_params', sa.String(length=5000), nullable=True),
sa.Column('x_is_date', sa.Boolean(), nullable=True),
sa.Column('iteration_no', sa.Integer(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
if conn.dialect.name == 'mysql':
conn.execute("SET time_zone = '+00:00'")
op.alter_column(table_name='chart', column_name='last_modified', type_=mysql.TIMESTAMP(fsp=6))
else:
if conn.dialect.name in ('sqlite', 'mssql'):
return
if conn.dialect.name == 'postgresql':
conn.execute("set timezone=UTC")
op.alter_column(table_name='chart', column_name='last_modified', type_=sa.TIMESTAMP(timezone=True))
|
the-stack_0_9824 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Gaussian """
import unittest
from test import QiskitMachineLearningTestCase
from test.datasets import get_deprecated_msg_ref
import warnings
import numpy as np
from qiskit_machine_learning.datasets import gaussian
class TestGaussian(QiskitMachineLearningTestCase):
"""Gaussian tests."""
def test_gaussian(self):
"""Gaussian test."""
with warnings.catch_warnings(record=True) as c_m:
warnings.simplefilter("always")
training_features, training_labels, test_features, test_labels = gaussian(
training_size=20, test_size=10, n=2, plot_data=False
)
with self.subTest("Test training_features"):
np.testing.assert_array_equal(training_features.shape, (40, 2))
with self.subTest("Test training_labels1"):
np.testing.assert_array_equal(training_labels.shape, (40, 2))
with self.subTest("Test training_labels2"):
np.testing.assert_array_equal(np.sum(training_labels, axis=0), np.array([20, 20]))
with self.subTest("Test training_labels3"):
np.testing.assert_array_equal(np.sum(training_labels, axis=1), np.ones(40))
with self.subTest("Test features.shape1"):
np.testing.assert_array_equal(test_features.shape, (20, 2))
with self.subTest("Test features.shape2"):
np.testing.assert_array_equal(test_features.shape, (20, 2))
with self.subTest("Test test_labels1"):
np.testing.assert_array_equal(np.sum(test_labels, axis=0), np.array([10, 10]))
with self.subTest("Test test_labels2"):
np.testing.assert_array_equal(np.sum(test_labels, axis=1), np.ones(20))
with self.subTest("Test deprecation msg"):
msg = str(c_m[0].message)
self.assertEqual(msg, get_deprecated_msg_ref("gaussian"))
if __name__ == "__main__":
unittest.main()
|
the-stack_0_9827 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# this allows to use the readthedocs theme also locally
import sphinx_rtd_theme
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'PyAero'
copyright = '2021, Andreas Ennemoser'
author = 'Andreas Ennemoser'
# The short X.Y version
version = '2.0'
# The full version, including alpha/beta/rc tags
release = 'v2.0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyAerodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyAero.tex', 'PyAero Documentation',
'Andreas Ennemoser', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyaero', 'PyAero Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyAero', 'PyAero Documentation',
author, 'PyAero', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
the-stack_0_9828 | import math
import random
# random
x = 10
y = 50
print(random.randrange(x, y))
# math
num1 = 234.01
num2 = 6
num3 = -27.01
print("The smallest integer greater than or equal to num1,",
num1, ":", math.ceil(num1))
print("The largest integer smaller than or equal to num1,",
num1, ":", math.floor(num1))
print("The factorial of num2,", num2, ":", math.factorial(num2))
print("The absolute value of num3", num3, ":", math.fabs(num3))
|
the-stack_0_9829 | """Tests for the MDWeb Base Objects."""
from pyfakefs import fake_filesystem_unittest, fake_filesystem
import unittest
from mdweb.BaseObjects import MetaInfParser
from mdweb.Exceptions import PageMetaInfFieldException
from mdweb.Navigation import Navigation
from mdweb.Page import Page, load_page
class TesNavigationBaseItem(fake_filesystem_unittest.TestCase):
"""MDSite Navigation Base tests."""
def setUp(self):
"""Create fake filesystem."""
self.setUpPyfakefs()
self.fake_os = fake_filesystem.FakeOsModule(self.fs)
def test_navigation_type(self):
"""A directory should have navigation type 'Navigation'."""
self.fs.create_file('/my/content/index.md')
nav = Navigation('/my/content')
self.assertEqual(nav.nav_type, "Navigation")
def test_page_type(self):
"""A file in a directory should have navigation type 'Page'."""
file_string = u""
self.fs.create_file('/my/content/index.md',
contents=file_string)
page = Page(*load_page('/my/content', '/my/content/index.md'))
self.assertEqual(page.nav_type, "Page")
class TestMetaInfParser(unittest.TestCase):
"""Index object tests."""
class MockMetaInf(MetaInfParser): # pylint: disable=R0903
"""MDWeb Navigation Meta Information."""
FIELD_TYPES = {
'nav_name': ('unicode', None),
'order': ('int', 0),
}
def test_blank_value(self):
"""A blank value in a meta-inf definition should raise exception."""
self.assertRaises(PageMetaInfFieldException,
self.MockMetaInf,
'''Nav Name: Documentation
Order: ''')
|
the-stack_0_9832 | # -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import numpy as np
import tensorflow as tf
from tensorboard.plugins import projector
from text_fast import TextFAST
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
args = parser.parameter_parser()
OPTION = dh._option(pattern=0)
logger = dh.logger_fn("tflog", "logs/{0}-{1}.log".format('Train' if OPTION == 'T' else 'Restore', time.asctime()))
def create_input_data(data: dict):
return zip(data['f_pad_seqs'], data['b_pad_seqs'], data['onehot_labels'])
def train_fasttext():
"""Training FASTTEXT model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load word2vec model
word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)
# Load sentences, labels, and training parameters
logger.info("Loading data...")
logger.info("Data processing...")
train_data = dh.load_data_and_labels(args, args.train_file, word2idx)
val_data = dh.load_data_and_labels(args, args.validation_file, word2idx)
# Build a graph and fasttext object
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
fasttext = TextFAST(
sequence_length=args.pad_seq_len,
vocab_size=len(word2idx),
embedding_type=args.embedding_type,
embedding_size=args.embedding_dim,
num_classes=args.num_classes,
l2_reg_lambda=args.l2_lambda,
pretrained_embedding=embedding_matrix)
# Define training procedure
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
learning_rate = tf.train.exponential_decay(learning_rate=args.learning_rate,
global_step=fasttext.global_step,
decay_steps=args.decay_steps,
decay_rate=args.decay_rate,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads, vars = zip(*optimizer.compute_gradients(fasttext.loss))
grads, _ = tf.clip_by_global_norm(grads, clip_norm=args.norm_ratio)
train_op = optimizer.apply_gradients(zip(grads, vars), global_step=fasttext.global_step,
name="train_op")
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in zip(grads, vars):
if g is not None:
grad_hist_summary = tf.summary.histogram("{0}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{0}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
out_dir = dh.get_out_dir(OPTION, logger)
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
best_checkpoint_dir = os.path.abspath(os.path.join(out_dir, "bestcheckpoints"))
# Summaries for loss
loss_summary = tf.summary.scalar("loss", fasttext.loss)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Validation summaries
validation_summary_op = tf.summary.merge([loss_summary])
validation_summary_dir = os.path.join(out_dir, "summaries", "validation")
validation_summary_writer = tf.summary.FileWriter(validation_summary_dir, sess.graph)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=args.num_checkpoints)
best_saver = cm.BestCheckpointSaver(save_dir=best_checkpoint_dir, num_to_keep=3, maximize=True)
if OPTION == 'R':
# Load fasttext model
logger.info("Loading model...")
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
logger.info(checkpoint_file)
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
if OPTION == 'T':
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Embedding visualization config
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = "embedding"
embedding_conf.metadata_path = args.metadata_file
projector.visualize_embeddings(train_summary_writer, config)
projector.visualize_embeddings(validation_summary_writer, config)
# Save the embedding visualization
saver.save(sess, os.path.join(out_dir, "embedding", "embedding.ckpt"))
current_step = sess.run(fasttext.global_step)
def train_step(batch_data):
"""A single training step."""
x_f, x_b, y_onehot = zip(*batch_data)
feed_dict = {
fasttext.input_x_front: x_f,
fasttext.input_x_behind: x_b,
fasttext.input_y: y_onehot,
fasttext.dropout_keep_prob: args.dropout_rate,
fasttext.is_training: True
}
_, step, summaries, loss = sess.run(
[train_op, fasttext.global_step, train_summary_op, fasttext.loss], feed_dict)
logger.info("step {0}: loss {1:g}".format(step, loss))
train_summary_writer.add_summary(summaries, step)
def validation_step(val_loader, writer=None):
"""Evaluates model on a validation set."""
batches_validation = dh.batch_iter(list(create_input_data(val_loader)), args.batch_size, 1)
eval_counter, eval_loss = 0, 0.0
true_labels = []
predicted_scores = []
predicted_labels = []
for batch_validation in batches_validation:
x_f, x_b, y_onehot = zip(*batch_validation)
feed_dict = {
fasttext.input_x_front: x_f,
fasttext.input_x_behind: x_b,
fasttext.input_y: y_onehot,
fasttext.dropout_keep_prob: 1.0,
fasttext.is_training: False
}
step, summaries, predictions, cur_loss = sess.run(
[fasttext.global_step, validation_summary_op,
fasttext.topKPreds, fasttext.loss], feed_dict)
# Prepare for calculating metrics
for i in y_onehot:
true_labels.append(np.argmax(i))
for j in predictions[0]:
predicted_scores.append(j[0])
for k in predictions[1]:
predicted_labels.append(k[0])
eval_loss = eval_loss + cur_loss
eval_counter = eval_counter + 1
if writer:
writer.add_summary(summaries, step)
eval_loss = float(eval_loss / eval_counter)
# Calculate Precision & Recall & F1
eval_acc = accuracy_score(y_true=np.array(true_labels), y_pred=np.array(predicted_labels))
eval_pre = precision_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
eval_rec = recall_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
eval_F1 = f1_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
# Calculate the average AUC
eval_auc = roc_auc_score(y_true=np.array(true_labels),
y_score=np.array(predicted_scores), average='micro')
return eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc
# Generate batches
batches_train = dh.batch_iter(list(create_input_data(train_data)), args.batch_size, args.epochs)
num_batches_per_epoch = int((len(train_data['f_pad_seqs']) - 1) / args.batch_size) + 1
# Training loop. For each batch...
for batch_train in batches_train:
train_step(batch_train)
current_step = tf.train.global_step(sess, fasttext.global_step)
if current_step % args.evaluate_steps == 0:
logger.info("\nEvaluation:")
eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc = \
validation_step(val_data, writer=validation_summary_writer)
logger.info("All Validation set: Loss {0:g} | Acc {1:g} | Precision {2:g} | "
"Recall {3:g} | F1 {4:g} | AUC {5:g}"
.format(eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc))
best_saver.handle(eval_acc, sess, current_step)
if current_step % args.checkpoint_steps == 0:
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logger.info("Saved model checkpoint to {0}\n".format(path))
if current_step % num_batches_per_epoch == 0:
current_epoch = current_step // num_batches_per_epoch
logger.info("Epoch {0} has finished!".format(current_epoch))
logger.info("All Done.")
if __name__ == '__main__':
train_fasttext()
|
the-stack_0_9833 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import random
import shutil
import time
import warnings
import moxing as mox
import apex
import numpy as np
import torch.npu
from apex import amp
from collections import OrderedDict
import torch
import torch.onnx
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
from models import resnet_0_6_0
CALCULATE_DEVICE = "npu:0"
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', default='', type=str,
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--npu', default=None, type=int,
help='NPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--device', default='npu', type=str, help='npu or gpu')
parser.add_argument('--addr', default='10.136.181.115',
type=str, help='master addr')
parser.add_argument('--amp', default=False, action='store_true',
help='use amp to train the model')
parser.add_argument('--warm_up_epochs', default=0, type=int,
help='warm up')
parser.add_argument('--loss-scale', default=1024., type=float,
help='loss scale using in amp, default -1 means dynamic')
parser.add_argument('--opt-level', default='O2', type=str,
help='loss scale using in amp, default -1 means dynamic')
parser.add_argument('--prof', default=False, action='store_true',
help='use profiling to evaluate the performance of model')
parser.add_argument('--save_path', default='', type=str,
help='path to save models')
parser.add_argument('--num_classes', default=1000, type=int,
help='path to save models')
# modelarts modification
parser.add_argument('--train_url',
default='',
type=str,
help="setting dir of training output")
parser.add_argument('--data_url',
metavar='DIR',
default='',
help='path to dataset')
parser.add_argument('--model_url',
metavar='DIR',
default='',
help='path to pretrained model')
parser.add_argument('--onnx', default=True, action='store_true',
help="convert pth model to onnx")
cur_step = 0
CACHE_TRAINING_URL = "/cache/training/"
CACHE_DATA_URL = "/cache/data_url"
CACHE_MODEL_URL = "/cache/model"
best_acc1 = 0
def main():
args = parser.parse_args()
global CALCULATE_DEVICE
CALCULATE_DEVICE = "npu:{}".format(args.npu)
if 'npu' in CALCULATE_DEVICE:
torch.npu.set_device(CALCULATE_DEVICE)
if args.data_url:
import moxing as mox
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
###### modify npu_p1 1######
args.gpu = None
###### modify npu_p1 1 end ######
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
###### modify 8 ######
if args.device == 'npu':
dist.init_process_group(backend=args.dist_backend, # init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
else:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
###### modify 8 end ######
# create model
if args.pretrained:
print("=> using pre-trained model wide_resnet101_2")
model = resnet_0_6_0.wide_resnet101_2()
print("loading model of yours...")
model_path = "./checkpoint.pth.tar"
if args.model_url:
real_path = CACHE_MODEL_URL
if not os.path.exists(real_path):
os.makedirs(real_path)
mox.file.copy_parallel(args.model_url, real_path)
print("training data finish copy to %s." % real_path)
model_path = os.path.join(CACHE_MODEL_URL, 'checkpoint.pth.tar')
pretrained_dict = torch.load(model_path, map_location="cpu")["state_dict"]
model.load_state_dict({k.replace('module.', ''): v for k, v in pretrained_dict.items()})
if "fc.weight" in pretrained_dict:
pretrained_dict.pop('fc.weight')
pretrained_dict.pop('fc.bias')
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Linear(2048, args.num_classes)
#model.load_state_dict(pretrained_dict, strict=False)
else:
print("=> creating model wide_resnet101_2")
model = resnet_0_6_0.wide_resnet101_2()
# if not torch.cuda.is_available():
# print('using CPU, this will be slow')
# elif args.distributed:
###### modify npu_p1 2######
if args.distributed:
###### modify npu_p1 2 end ######
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
# model = torch.nn.DataParallel(model).cuda()
###### modify npu_p1 3######
model = model.to(CALCULATE_DEVICE)
###### modify npu_p1 3 end ######
# define loss function (criterion) and optimizer
# criterion = nn.CrossEntropyLoss().cuda(args.gpu)
############## npu modify 4 begin #############
# 将损失函数迁移到NPU上进行计算。
criterion = nn.CrossEntropyLoss().to(CALCULATE_DEVICE)
############## npu modify 4 end #############
optimizer = apex.optimizers.NpuFusedSGD(model.parameters(), args.lr,
momentum=args.momentum,
nesterov=True,
weight_decay=args.weight_decay)
###### modify 1 ######
if args.amp:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.opt_level, loss_scale=args.loss_scale)
###### modify 1 end ######
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
if args.data_url:
real_path = CACHE_DATA_URL
if not os.path.exists(real_path):
os.makedirs(real_path)
mox.file.copy_parallel(args.data_url, real_path)
print("training data finish copy to %s." % real_path)
args.data = real_path
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
###### modify 7 ######
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(
train_sampler is None),
num_workers=args.workers, pin_memory=False, sampler=train_sampler, drop_last=True)
###### modify 7 end #######
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
###### modify 3 ######
if args.prof:
profiling(train_loader, model, criterion, optimizer, args)
return
###### modify 3 end ######
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
if args.train_url:
mox.file.copy_parallel(CACHE_TRAINING_URL, args.train_url)
def proc_node_module(checkpoint, AttrName):
new_state_dict = OrderedDict()
for k, v in checkpoint[AttrName].items():
if(k[0:7] == "module."):
name = k[7:]
else:
name = k[0:]
new_state_dict[name] = v
return new_state_dict
def convert(model_path, onnx_save, num_class):
checkpoint = torch.load(model_path, map_location='cpu')
checkpoint['state_dict'] = proc_node_module(checkpoint, 'state_dict')
model = resnet_0_6_0.wide_resnet101_2(num_classes=num_class)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
input_names = ["actual_input_1"]
output_names = ["output1"]
dummy_input = torch.randn(1, 3, 224, 224)
if len(onnx_save) > 0:
save_path = os.path.join(onnx_save, "wide_resnet101_2_npu_16.onnx")
else:
save_path = "wide_resnet101_2_npu_16.onnx"
print(save_path)
torch.onnx.export(model, dummy_input, save_path
, input_names=input_names, output_names=output_names
, opset_version=11)
def profiling(data_loader, model, criterion, optimizer, args):
# switch to train mode
model.train()
def update(model, images, target, optimizer):
output = model(images)
loss = criterion(output, target)
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.zero_grad()
optimizer.step()
for step, (images, target) in enumerate(data_loader):
if args.device == 'npu':
# loc = 'npu:{}'.format(args.gpu)
loc = CALCULATE_DEVICE
images = images.to(loc, non_blocking=True).to(torch.float)
target = target.to(torch.int32).to(loc, non_blocking=True)
else:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
if step < 5:
update(model, images, target, optimizer)
else:
if args.device == 'npu':
with torch.autograd.profiler.profile(use_npu=True) as prof:
update(model, images, target, optimizer)
else:
with torch.autograd.profiler.profile(use_cuda=True) as prof:
update(model, images, target, optimizer)
break
prof.export_chrome_trace("output.prof")
def train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
# if torch.cuda.is_available():
# target = target.cuda(args.gpu, non_blocking=True)
############## npu modify 5 begin #############
# 将数据集迁移到NPU上进行计算并修改target数据类型
if 'npu' in CALCULATE_DEVICE:
target = target.to(torch.int32)
images, target = images.to(CALCULATE_DEVICE, non_blocking=True), target.to(CALCULATE_DEVICE, non_blocking=True)
############## npu modify 5 end #############
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
###### modify 2 ######
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
###### modify 2 end ######
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
###### modify 4 ######
if i % args.print_freq == 0:
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
progress.display(i)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
# print("[npu id:", args.gpu, "]", "batch_size:", ngpus_per_node * args.batch_size,
# 'Time: {:.3f}'.format(batch_time.avg), '* FPS@all {:.3f}'.format(
# args.batch_size / batch_time.avg))
if batch_time.avg:
print("[npu id:", CALCULATE_DEVICE, "]", "batch_size:", args.world_size * args.batch_size,
'Time: {:.3f}'.format(batch_time.avg), '* FPS@all {:.3f}'.format(
args.batch_size * args.world_size / batch_time.avg))
###### modify 4 end ######
def validate(val_loader, model, criterion, args):
###### modify 5 ######
batch_time = AverageMeter('Time', ':6.3f', start_count_index= 5)
###### modify 5 end ######
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.device == 'npu':
loc = CALCULATE_DEVICE
images = images.to(loc).to(torch.float)
if args.device == 'npu':
loc = CALCULATE_DEVICE
target = target.to(torch.int32).to(loc, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
args = parser.parse_args()
if args.train_url:
os.makedirs(CACHE_TRAINING_URL, 0o755, exist_ok=True)
filename = os.path.join(CACHE_TRAINING_URL, filename)
torch.save(state, filename)
convert(filename, CACHE_TRAINING_URL, args.num_classes)
path_best = os.path.join(CACHE_TRAINING_URL, 'model_best.pth.tar')
if is_best:
shutil.copyfile(filename, path_best)
else:
filename = os.path.join(args.save_path, filename)
torch.save(state, filename)
path_best = os.path.join(args.save_path, 'model_best.pth.tar')
if is_best:
shutil.copyfile(filename, path_best)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', start_count_index=2):
self.name = name
self.fmt = fmt
self.reset()
self.start_count_index = start_count_index
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if self.count == 0:
self.N = n
self.val = val
self.count += n
if self.count > (self.start_count_index * self.N):
self.sum += val * n
self.avg = self.sum / (self.count - self.start_count_index * self.N)
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by cosine method"""
if args.warm_up_epochs > 0 and epoch < args.warm_up_epochs:
lr = args.lr * ((epoch + 1) / (args.warm_up_epochs + 1))
else:
alpha = 0
cosine_decay = 0.5 * (
1 + np.cos(np.pi * (epoch - args.warm_up_epochs) / (args.epochs - args.warm_up_epochs)))
decayed = (1 - alpha) * cosine_decay + alpha
lr = args.lr * decayed
print("=> Epoch[%d] Setting lr: %.4f" % (epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
############## npu modify 6 begin #############
############## npu modify 6 begin #############
main()
|
the-stack_0_9835 | # Copyright 2017 the pycolab Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defeat marauders from somewhere exterior to this planet.
Keys: left, right - move. space - fire. q - quit.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import numpy as np
import sys
from pycolab import ascii_art
from pycolab import human_ui
from pycolab import rendering
from pycolab import things as plab_things
from pycolab.prefab_parts import sprites as prefab_sprites
# Not shown in this ASCII art diagram are the Sprites we use for laser blasts,
# which control the characters listed in UPWARD_BOLT_CHARS and
# DOWNWARD_BOLT_CHARS below.
GAME_ART = [' X X X X X X X X ', # Row 0
' X X X X X X X X ',
' X X X X X X X X ',
' X X X X X X X X ',
' X X X X X X X X ',
' ', # Row 5
' ',
' ',
' ',
' ',
' ', # Row 10. If a Marauder
' BBBB BBBB BBBB BBBB ', # makes it to row 10,
' BBBB BBBB BBBB BBBB ', # the game is over.
' BBBB BBBB BBBB BBBB ',
' ',
' P ']
# Characters listed in UPWARD_BOLT_CHARS are used for Sprites that represent
# laser bolts that the player shoots toward Marauders. Add more characters if
# you want to be able to have more than two of these bolts in the "air" at once.
UPWARD_BOLT_CHARS = 'abcd'
# Characters listed in DOWNWARD_BOLT_CHARS are used for Sprites that represent
# laser bolts that Marauders shoot toward the player. Add more charcters if you
# want more shooting from the Marauders.
DOWNWARD_BOLT_CHARS = 'yz'
# Shorthand for various points in the program:
_ALL_BOLT_CHARS = UPWARD_BOLT_CHARS + DOWNWARD_BOLT_CHARS
# To make life a bit easier for the player (and avoid the need for frame
# stacking), we use different characters to indicate the directions that the
# bolts go. If you'd like to make this game harder, you might try mapping both
# kinds of bolts to the same character.
LASER_REPAINT_MAPPING = dict(
[(b, '^') for b in UPWARD_BOLT_CHARS] +
[(b, '|') for b in DOWNWARD_BOLT_CHARS])
# These colours are only for humans to see in the CursesUi.
COLOURS_FG = {' ': (0, 0, 0), # Space, inky blackness of.
'X': (999, 999, 999), # The Marauders.
'B': (400, 50, 30), # The bunkers.
'P': (0, 999, 0), # The player.
'^': (0, 999, 999), # Bolts from player to aliens.
'|': (0, 999, 999)} # Bolts from aliens to player.
COLOURS_BG = {'^': (0, 0, 0), # Bolts from player to aliens.
'|': (0, 0, 0)} # Bolts from aliens to player.
#def _init_ ?
def make_game():
"""Builds and returns an Extraterrestrial Marauders game."""
return ascii_art.ascii_art_to_game(
GAME_ART, what_lies_beneath=' ',
sprites=dict(
[('P', PlayerSprite)] +
[(c, UpwardLaserBoltSprite) for c in UPWARD_BOLT_CHARS] +
[(c, DownwardLaserBoltSprite) for c in DOWNWARD_BOLT_CHARS]),
drapes=dict(X=MarauderDrape,
B=BunkerDrape),
update_schedule=['P', 'B', 'X'] + list(_ALL_BOLT_CHARS),
nb_action=5)
class BunkerDrape(plab_things.Drape):
"""A `Drape` for the bunkers at the bottom of the screen.
Bunkers are gradually eroded by laser bolts, for which the user loses one
point. Other than that, they don't really do much. If a laser bolt hits a
bunker, this Drape leaves a note about it in the Plot---the bolt's Sprite
checks this and removes itself from the board if it's present.
"""
def update(self, actions, board, layers, backdrop, things, the_plot):
# Where are the laser bolts? Bolts from players or marauders do damage.
bolts = np.logical_or.reduce([layers[c] for c in _ALL_BOLT_CHARS], axis=0)
hits = bolts & self.curtain # Any hits to a bunker?
np.logical_xor(self.curtain, hits, self.curtain) # If so, erode the bunker...
the_plot.add_reward(-np.sum(hits)) # ...and impose a penalty.
# Save the identities of bunker-striking bolts in the Plot.
the_plot['bunker_hitters'] = [chr(c) for c in board[hits]]
class MarauderDrape(plab_things.Drape):
"""A `Drape` for the marauders descending downward toward the player.
The Marauders all move in lockstep, which makes them an ideal application of
a Drape. Bits of the Drape get eroded by laser bolts from the player; each
hit earns ten points. If the Drape goes completely empty, or if any Marauder
makes it down to row 10, the game terminates.
As with `BunkerDrape`, if a laser bolt hits a Marauder, this Drape leaves a
note about it in the Plot; the bolt's Sprite checks this and removes itself
from the board if present.
"""
def __init__(self, curtain, character):
# The constructor just sets the Marauder's initial horizontal direction.
super(MarauderDrape, self).__init__(curtain, character)
self._dx = -1
def update(self, actions, board, layers, backdrop, things, the_plot):
# Where are the laser bolts? Only bolts from the player kill a Marauder.
bolts = np.logical_or.reduce([layers[c] for c in UPWARD_BOLT_CHARS], axis=0)
hits = bolts & self.curtain # Any hits to Marauders?
np.logical_xor(self.curtain, hits, self.curtain) # If so, zap the marauder...
the_plot.add_reward(np.sum(hits)*10) # ...and supply a reward.
# Save the identities of marauder-striking bolts in the Plot.
the_plot['marauder_hitters'] = [chr(c) for c in board[hits]]
# If no Marauders are left, or if any are sitting on row 10, end the game.
if (not self.curtain.any()) or self.curtain[10, :].any():
return the_plot.terminate_episode() # i.e. return None.
# We move faster if there are fewer Marauders. The odd divisor causes speed
# jumps to align on the high sides of multiples of 8; so, speed increases as
# the number of Marauders decreases to 32 (or 24 etc.), not 31 (or 23 etc.).
if the_plot.frame % max(1, np.sum(self.curtain)//8.0000001): return
# If any Marauder reaches either side of the screen, reverse horizontal
# motion and advance vertically one row.
if np.any(self.curtain[:, 0] | self.curtain[:, -1]):
self._dx = -self._dx
self.curtain[:] = np.roll(self.curtain, shift=1, axis=0)
self.curtain[:] = np.roll(self.curtain, shift=self._dx, axis=1)
class PlayerSprite(prefab_sprites.MazeWalker):
"""A `Sprite` for our player.
This `Sprite` simply ties actions to going left and right. In interactive
settings, the user can also quit.
"""
def __init__(self, corner, position, character):
"""Simply indicates to the superclass that we can't walk off the board."""
super(PlayerSprite, self).__init__(
corner, position, character, impassable='', confined_to_board=True)
def update(self, actions, board, layers, backdrop, things, the_plot):
del layers, backdrop, things # Unused.
if actions == 0: # go leftward?
self._west(board, the_plot)
elif actions == 1: # go rightward?
self._east(board, the_plot)
elif actions == 4: # quit?
the_plot.terminate_episode()
class UpwardLaserBoltSprite(prefab_sprites.MazeWalker):
"""Laser bolts shot from the player toward Marauders."""
def __init__(self, corner, position, character):
"""Starts the Sprite in a hidden position off of the board."""
super(UpwardLaserBoltSprite, self).__init__(
corner, position, character, impassable='')
self._teleport((-1, -1))
def update(self, actions, board, layers, backdrop, things, the_plot):
if self.visible:
self._fly(board, layers, things, the_plot)
elif actions == 2:
self._fire(layers, things, the_plot)
def _fly(self, board, layers, things, the_plot):
"""Handles the behaviour of visible bolts flying toward Marauders."""
# Disappear if we've hit a Marauder or a bunker.
if (self.character in the_plot['bunker_hitters'] or
self.character in the_plot['marauder_hitters']):
return self._teleport((-1, -1))
# Otherwise, northward!
self._north(board, the_plot)
def _fire(self, layers, things, the_plot):
"""Launches a new bolt from the player."""
# We don't fire if the player fired another bolt just now.
if the_plot.get('last_player_shot') == the_plot.frame: return
the_plot['last_player_shot'] = the_plot.frame
# We start just above the player.
row, col = things['P'].position
self._teleport((row-1, col))
class DownwardLaserBoltSprite(prefab_sprites.MazeWalker):
"""Laser bolts shot from Marauders toward the player."""
def __init__(self, corner, position, character):
"""Starts the Sprite in a hidden position off of the board."""
super(DownwardLaserBoltSprite, self).__init__(
corner, position, character, impassable='')
self._teleport((-1, -1))
def update(self, actions, board, layers, backdrop, things, the_plot):
if self.visible:
self._fly(board, layers, things, the_plot)
else:
self._fire(layers, the_plot)
def _fly(self, board, layers, things, the_plot):
"""Handles the behaviour of visible bolts flying toward the player."""
# Disappear if we've hit a bunker.
if self.character in the_plot['bunker_hitters']:
return self._teleport((-1, -1))
# End the game if we've hit the player.
if self.position == things['P'].position: the_plot.terminate_episode()
self._south(board, the_plot)
def _fire(self, layers, the_plot):
"""Launches a new bolt from a random Marauder."""
# We don't fire if another Marauder fired a bolt just now.
if the_plot.get('last_marauder_shot') == the_plot.frame: return
the_plot['last_marauder_shot'] = the_plot.frame
# Which Marauder should fire the laser bolt?
col = np.random.choice(np.nonzero(layers['X'].sum(axis=0))[0])
row = np.nonzero(layers['X'][:, col])[0][-1] + 1
# Move ourselves just below that Marauder.
self._teleport((row, col))
def main(argv=()):
del argv # Unused.
# Build an Extraterrestrial Marauders game.
game = make_game()
# Build an ObservationCharacterRepainter that will make laser bolts of the
# same type all look identical.
repainter = rendering.ObservationCharacterRepainter(LASER_REPAINT_MAPPING)
# Make a CursesUi to play it with.
ui = human_ui.CursesUi(
keys_to_actions={curses.KEY_LEFT: 0, curses.KEY_RIGHT: 1,
' ': 2, # shoot
-1: 3, # no-op
'q': 4}, # quit
repainter=repainter, delay=300,
colour_fg=COLOURS_FG, colour_bg=COLOURS_BG)
# Let the game begin!
ui.play(game)
if __name__ == '__main__':
main(sys.argv)
|
the-stack_0_9837 | """Utility for retrieveing the docstring of a dataclass's attributes
@author: Fabrice Normandin
"""
import inspect
import typing
from argparse import ArgumentTypeError
from dataclasses import dataclass
from typing import *
from logging import getLogger
logger = getLogger(__name__)
@dataclass
class AttributeDocString:
"""Simple dataclass for holding the comments of a given field."""
comment_above: str = ""
comment_inline: str = ""
docstring_below: str = ""
def get_attribute_docstring(
some_dataclass: Type, field_name: str
) -> AttributeDocString:
"""Returns the docstrings of a dataclass field.
NOTE: a docstring can either be:
- An inline comment, starting with <#>
- A Comment on the preceding line, starting with <#>
- A docstring on the following line, starting with either <\"\"\"> or <'''>
Arguments:
some_dataclass {type} -- a dataclass
field_name {str} -- the name of the field.
Returns:
AttributeDocString -- an object holding the three possible comments
"""
try:
source = inspect.getsource(some_dataclass)
except TypeError as e:
logger.debug(f"Couldn't find the attribute docstring: {e}")
return AttributeDocString()
code_lines: List[str] = source.splitlines()
# the first line is the class definition, we skip it.
start_line_index = 1
# starting at the second line, there might be the docstring for the class.
# We want to skip over that until we reach an attribute definition.
while start_line_index < len(code_lines):
if _contains_attribute_definition(code_lines[start_line_index]):
break
start_line_index += 1
lines_with_attribute_defs = [
(index, line)
for index, line in enumerate(code_lines)
if _contains_attribute_definition(line)
]
for i, line in lines_with_attribute_defs:
parts: List[str] = line.split(":", maxsplit=1)
if parts[0].strip() == field_name:
# we found the line with the definition of this field.
comment_above = _get_comment_ending_at_line(code_lines, i - 1)
comment_inline = _get_inline_comment_at_line(code_lines, i)
docstring_below = _get_docstring_starting_at_line(code_lines, i + 1)
complete_docstring = AttributeDocString(
comment_above, comment_inline, docstring_below
)
return complete_docstring
# we didn't find the attribute.
mro = inspect.getmro(some_dataclass)
if len(mro) == 1:
raise RuntimeWarning(
f"Couldn't find the given attribute name {field_name}' within the "
"given class."
)
base_class = mro[1]
try:
return get_attribute_docstring(base_class, field_name)
except OSError as e:
logger.warning(UserWarning(f"Couldn't find the docstring: {e}"))
return AttributeDocString()
def _contains_attribute_definition(line_str: str) -> bool:
"""Returns wether or not a line contains a an dataclass field definition.
Arguments:
line_str {str} -- the line content
Returns:
bool -- True if there is an attribute definition in the line.
"""
parts = line_str.split("#", maxsplit=1)
before_comment = parts[0].strip()
before_first_equal = before_comment.split("=", maxsplit=1)[0]
parts = before_first_equal.split(":")
if len(parts) != 2:
# For now, I don't think it's possible to have a type annotation contain :
return False
attr_name = parts[0]
attr_type = parts[1]
return not attr_name.isspace() and not attr_type.isspace()
def _is_empty(line_str: str) -> bool:
return line_str.strip() == ""
def _is_comment(line_str: str) -> bool:
return line_str.strip().startswith("#")
def _get_comment_at_line(code_lines: List[str], line: int) -> str:
"""Gets the comment at line `line` in `code_lines`.
Arguments:
line {int} -- the index of the line in code_lines
Returns:
str -- the comment at the given line. empty string if not present.
"""
line_str = code_lines[line]
assert not _contains_attribute_definition(line_str)
if "#" not in line_str:
return ""
parts = line_str.split("#", maxsplit=1)
comment = parts[1].strip()
return comment
def _get_inline_comment_at_line(code_lines: List[str], line: int) -> str:
"""Gets the inline comment at line `line`.
Arguments:
line {int} -- the index of the line in code_lines
Returns:
str -- the inline comment at the given line, else an empty string.
"""
assert 0 <= line < len(code_lines)
assert _contains_attribute_definition(code_lines[line])
line_str = code_lines[line]
parts = line_str.split("#", maxsplit=1)
if len(parts) != 2:
return ""
comment = parts[1].strip()
return comment
def _get_comment_ending_at_line(code_lines: List[str], line: int) -> str:
result = ""
start_line = line
end_line = line
# print(f"Get comment ending at line {line}")
# for i, l in enumerate(code_lines):
# print(f"line {i}: {l}")
# move up the code, one line at a time, while we don't hit the start,
# an attribute definition, or the end of a docstring.
while start_line > 0:
line_str = code_lines[start_line]
if _contains_attribute_definition(line_str):
break # previous line is an assignment
if '"""' in line_str or "'''" in line_str:
break # previous line has a docstring
start_line -= 1
start_line += 1
lines = []
for i in range(start_line, end_line + 1):
# print(f"line {i}: {code_lines[i]}")
if _is_empty(code_lines[i]):
continue
assert not _contains_attribute_definition(code_lines[i])
comment = _get_comment_at_line(code_lines, i)
lines.append(comment)
return "\n".join(lines)
def _get_docstring_starting_at_line(code_lines: List[str], line: int) -> str:
first_line = line
i = line
end_line: int
token: Optional[str] = None
triple_single = "'''"
triple_double = '"""'
# print("finding docstring starting from line", line)
# if we are looking further down than the end of the code, there is no
# docstring.
if line >= len(code_lines):
return ""
# the list of lines making up the docstring.
docstring_contents: List[str] = []
while i <= len(code_lines):
line_str = code_lines[i]
# print(f"(docstring) line {line}: {line_str}")
# we haven't identified the starting line yet.
if token is None:
if _is_empty(line_str):
i += 1
continue
elif _contains_attribute_definition(line_str) or _is_comment(line_str):
# we haven't reached the start of a docstring yet (since token
# is None), and we reached a line with an attribute definition,
# or a comment, hence the docstring is empty.
return ""
elif triple_single in line_str and triple_double in line_str:
# This handles something stupid like:
# @dataclass
# class Bob:
# a: int
# """ hello '''
# bob
# ''' bye
# """
triple_single_index = line_str.index(triple_single)
triple_double_index = line_str.index(triple_double)
if triple_single_index < triple_double_index:
token = triple_single
else:
token = triple_double
elif triple_double in line_str:
token = triple_double
elif triple_single in line_str:
token = triple_single
else:
# for i, line in enumerate(code_lines):
# print(f"line {i}: <{line}>")
# print(f"token: <{token}>")
# print(line_str)
logger.debug(
f"Warning: Unable to parse attribute docstring: {line_str}"
)
return ""
# get the string portion of the line (after a token or possibly
# between two tokens).
parts = line_str.split(token, maxsplit=2)
if len(parts) == 3:
# This takes care of cases like:
# @dataclass
# class Bob:
# a: int
# """ hello """
between_tokens = parts[1].strip()
# print("Between tokens:", between_tokens)
docstring_contents.append(between_tokens)
break
elif len(parts) == 2:
after_token = parts[1].strip()
# print("After token:", after_token)
docstring_contents.append(after_token)
else:
# print(f"token is <{token}>")
if token in line_str:
# print(f"Line {line} End of a docstring:", line_str)
before = line_str.split(token, maxsplit=1)[0]
docstring_contents.append(before.strip())
break
else:
# intermediate line without the token.
docstring_contents.append(line_str.strip())
i += 1
# print("Docstring contents:", docstring_contents)
return "\n".join(docstring_contents)
|
the-stack_0_9838 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=38
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.rx(2.708052867394402).on(input_qubit[1])) # number=11
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.Y.on(input_qubit[2])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=35
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=36
c.append(cirq.H.on(input_qubit[0])) # number=37
c.append(cirq.H.on(input_qubit[3])) # number=25
c.append(cirq.Z.on(input_qubit[1])) # number=20
c.append(cirq.Z.on(input_qubit[3])) # number=31
c.append(cirq.H.on(input_qubit[0])) # number=22
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=23
c.append(cirq.H.on(input_qubit[0])) # number=24
c.append(cirq.Z.on(input_qubit[2])) # number=15
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.Y.on(input_qubit[2])) # number=18
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=12
c.append(cirq.X.on(input_qubit[3])) # number=13
c.append(cirq.H.on(input_qubit[3])) # number=28
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=29
c.append(cirq.H.on(input_qubit[3])) # number=30
c.append(cirq.Z.on(input_qubit[3])) # number=10
c.append(cirq.H.on(input_qubit[0])) # number=32
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=33
c.append(cirq.H.on(input_qubit[0])) # number=34
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=27
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2905.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
the-stack_0_9839 | from openpyxl import Workbook
from openpyxl.styles import PatternFill
from openpyxl.formatting.rule import FormulaRule
wb = Workbook()
ws = wb.active
ws.cell(2, 1).value = '空白ではない'
ws.cell(4, 1).value = 5.3529
orange_fill = PatternFill('solid', start_color='FFA500', end_color='FFA500')
is_blank_rule = FormulaRule( #←Excelの数式を用いた設定
formula=['ISBLANK(INDIRECT(ADDRESS(ROW(), COLUMN())))'],
stopIfTrue=True,
fill=orange_fill
)
ws.conditional_formatting.add(f'A1:A5', is_blank_rule)
ws.title = 'クラシック(数式)'
wb.save('classic_formula.xlsx')
|
the-stack_0_9841 | template_open = '{{#ctx.payload.aggregations.result.hits.hits.0._source}}'
template_close = template_open.replace('{{#','{{/')
kibana_url = (
"{{ctx.metadata.kibana_url}}/app/kibana#/discover?"
"_a=(columns:!(_source),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,"
"index:'metricbeat-*',key:query,negate:!f,type:custom,value:''),"
"query:(bool:(must:!((regexp:(kubernetes.pod.name:'{{ctx.metadata.regex}}')),"
"(match:(metricset.name:'state_pod')),"
"(match:(kubernetes.namespace:{{ctx.metadata.namespace}}))))))),"
"index:'metricbeat-*',"
"interval:auto,query:(language:lucene,query:''),"
"regexp:(language:lucene,query:'kubernetes.pod.name:test-nginx-%5B%5E-%5D%20-%5B%5E-%5D%20'),"
"sort:!('@timestamp',desc),time:(from:now%2FM,mode:quick,to:now%2FM))"
"&_g=(refreshInterval:(display:Off,pause:!f,value:0),"
"time:(from:now-15m,mode:quick,to:now))"
)
watch_url = "{{ctx.metadata.kibana_url}}/app/management/insightsAndAlerting/watcher/watches/watch/{{ctx.metadata.name}}/status"
slack_alert_template = "{template_open}*<{kibana_url}|{{{{ctx.metadata.name}}}}>* has `{{{{ctx.payload.aggregations.pods.value}}}}` not ready pod(s) <{watch_url}|[ack]>{{{{#ctx.metadata.docs}}}} <{{{{.}}}}|[docs]>{{{{/ctx.metadata.docs}}}}{template_close}".format(**locals())
email_alert_template = "{template_open}<a href=\"{kibana_url}\">{{{{ctx.metadata.name}}}}</a> has {{{{ctx.payload.aggregations.pods.value}}}} not ready pod(s) <a href=\"{watch_url}\">[ack]</a>{{{{#ctx.metadata.docs}}}} <a href=\"{{{{.}}}}\">[docs]</a>{{{{/ctx.metadata.docs}}}}{template_close}".format(**locals())
k8s_template = {
"metadata": {
"name": "",
"namespace": "",
"regex": "",
"kibana_url": "",
"kibana_dashboard": "",
"docs": "",
"xpack" : {
"type" : "json"
},
},
"trigger": {
"schedule": {
"interval": ""
}
},
"input": {
"search": {
"request": {
"search_type": "query_then_fetch",
"indices": [
"metricbeat-*"
],
"rest_total_hits_as_int": True,
"body": {
"aggs": {
"result": {
"top_hits": {
"size": 1
}
},
"pods": {
"cardinality": {
"field": "kubernetes.pod.name"
}
},
"not_ready": {
"terms": {
"field": "kubernetes.pod.name",
"min_doc_count": 12,
"size": 100
}
}
},
"query": {
"bool": {
"must_not": [],
"must": [],
"filter": [
{
"range": {
"@timestamp": {
"gte": "now-{{ctx.metadata.window}}"
}
}
}
]
}
}
}
}
}
},
"condition": {},
"actions": {
"email_admin": {
"throttle_period_in_millis": 300000,
"email": {
"profile": "standard",
"subject": "{{#ctx.payload.aggregations.result.hits.hits.0._source}}{{ctx.metadata.name}} has {{ctx.payload.aggregations.pods.value}} not ready pod(s){{/ctx.payload.aggregations.result.hits.hits.0._source}}",
"body": {
"html": email_alert_template
}
}
},
"notify-slack": {
"throttle_period_in_millis": 300000,
"slack": {
"message": {
"text": slack_alert_template
}
}
}
}
}
metricbeat_template = {
"metadata": {
"window": "300s",
"subject": "No metricbeat data has been recieved in the last 5 minutes!"
},
"trigger": {
"schedule": {
"interval": "60s"
}
},
"input": {
"search": {
"request": {
"search_type": "query_then_fetch",
"indices": [
"metricbeat-*"
],
"rest_total_hits_as_int": True,
"body": {
"query": {
"bool": {
"must": [
{
"match": {
"metricset.name": "state_pod"
}
}
],
"filter": [
{
"range": {
"@timestamp": {
"gte": "now-{{ctx.metadata.window}}"
}
}
}
]
}
}
}
}
}
},
"condition": {
"compare": {
"ctx.payload.hits.total": {
"eq": 0
}
}
},
"actions": {
"email_admin": {
"throttle_period_in_millis": 300000,
"email": {
"profile": "standard",
"subject": "{{ctx.metadata.subject}}",
"body": {
"text": "{{ctx.metadata.message}}"
}
}
},
"notify-slack": {
"throttle_period_in_millis": 300000,
"slack": {
"message": {
"text": "{{ctx.metadata.message}}"
}
}
}
}
}
|
the-stack_0_9842 | import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?pandalive\.co\.kr/"
))
class Pandalive(Plugin):
_room_id_re = re.compile(r"roomid\s*=\s*String\.fromCharCode\((.*)\)")
def _get_streams(self):
media_code = self.session.http.get(self.url, schema=validate.Schema(
validate.parse_html(),
validate.xml_xpath_string(".//script[contains(text(), 'roomid')]/text()"),
validate.any(None, validate.all(
validate.transform(self._room_id_re.search),
validate.any(None, validate.all(
validate.get(1),
validate.transform(lambda s: "".join(map(lambda c: chr(int(c)), s.split(",")))),
)),
)),
))
if not media_code:
return
log.debug("Media code: {0}".format(media_code))
json = self.session.http.post(
"https://api.pandalive.co.kr/v1/live/play",
data={"action": "watch", "mediaCode": media_code},
schema=validate.Schema(
validate.parse_json(), {
validate.optional("media"): {
"title": validate.text,
"userId": validate.text,
"userNick": validate.text,
"isPw": bool,
"isLive": bool,
"liveType": validate.text,
},
validate.optional("PlayList"): {
"hls2": [{
"url": validate.url(),
}],
},
"result": bool,
"message": validate.text,
},
)
)
if not json["result"]:
log.error(json["message"])
return
if not json["media"]["isLive"]:
log.error("The broadcast has ended")
return
if json["media"]["isPw"]:
log.error("The broadcast is password protected")
return
log.info("Broadcast type: {0}".format(json['media']['liveType']))
self.author = "{0} ({1})".format(json['media']['userNick'], json['media']['userId'])
self.title = "{0}".format(json['media']['title'])
return HLSStream.parse_variant_playlist(self.session, json["PlayList"]["hls2"][0]["url"])
__plugin__ = Pandalive
|
the-stack_0_9843 | # coding=utf-8
"""
Ingest data from the command-line.
python srtm_prepare.py --output Elevation_1secSRTM_DEMs_v1.0_DEM_Mosaic_dem1sv1_0.yaml \
/g/data/rr1/Elevation/NetCDF/1secSRTM_DEMs_v1.0/DEM/Elevation_1secSRTM_DEMs_v1.0_DEM_Mosaic_dem1sv1_0.nc
"""
from __future__ import absolute_import
import uuid
from dateutil.parser import parse
import yaml
import click
import netCDF4
import os
def prepare_layers(images):
layerdict = {}
for i in images:
image = netCDF4.Dataset(i)
layerpath = str(image.filepath())
for targetlayer in image.variables.values():
if targetlayer.name not in ['crs', 'lat', 'lon']:
layername = str(targetlayer.name)
layerdict[layername] = {'path': layerpath, 'layer': layername, }
return layerdict
def prepare_dataset(image, datasets):
image = netCDF4.Dataset(image)
projection = image.variables.values()[0].spatial_ref
geotransform = ((str(image.variables.values()[0].GeoTransform)).split())
fgeotransform = [float(i) for i in geotransform]
lon_pixels = int(image.dimensions.values()[0].size)
lat_pixels = int(image.dimensions.values()[1].size)
left, right = float(fgeotransform[0]), float(fgeotransform[0] + (lon_pixels * fgeotransform[1]))
bottom, top = float(fgeotransform[3] + (lat_pixels * fgeotransform[5])), float(fgeotransform[3])
return {
'id': str(uuid.uuid4()),
'processing_level': 'modelled',
'product_type': 'DEM',
'creation_dt': parse(image.history[0:24]).isoformat(),
'platform': {'code': 'Space Shuttle Endeavour'},
'instrument': {'name': 'SIR'},
'extent': {
'coord': {
'ul': {'lon': left, 'lat': top},
'ur': {'lon': right, 'lat': top},
'll': {'lon': left, 'lat': bottom},
'lr': {'lon': right, 'lat': bottom},
},
'from_dt': parse(image.history[0:24]).isoformat(),
'to_dt': parse(image.history[0:24]).isoformat(),
'center_dt': parse(image.history[0:24]).isoformat(),
},
'format': {'name': 'NETCDF'},
'grid_spatial': {
'projection': {
'spatial_reference': projection,
'geo_ref_points': {
'ul': {'x': left, 'y': top},
'ur': {'x': right, 'y': top},
'll': {'x': left, 'y': bottom},
'lr': {'x': right, 'y': bottom},
}
}
},
'image': {
'bands': prepare_layers(datasets)
},
'lineage': {'source_datasets': {}},
}
@click.command(help="Prepare single layer netcdf with common grid spec for ingestion to Data Cube.")
@click.argument('datasets', type=click.Path(exists=True, readable=True), nargs=-1)
@click.option('--output', help="Write datasets into this file", type=click.Path(exists=False, writable=True))
def main(datasets, output):
with open(output, 'w') as stream:
yaml.dump((prepare_dataset(datasets[0], datasets)), stream)
if __name__ == "__main__":
main()
|
the-stack_0_9844 | '''
By Benjamin
'''
class Dealer:
'''
INPUT: None
This is the dealer's class.
'''
def __init__(self):
self.cards = [] # Place holder for the dealer's cards
self.hidden_card = 0 # Place holder for the hidden card
# ----------------------------
def hide_first_card(self):
'''
INPUT: None
Hides the first card given to the card.
'''
try:
self.hidden_card = self.cards[0]
except IndexError:
print('The dealer doe not have any card currenty.')
self.cards[0] = 'X'
# --------------------------------
def reveal_hidden_card(self):
'''
INPUT: None
Reveals the dealer's hidden card if this one exists.
'''
# Checking if a card has been hidden previously
if self.hide_first_card != 0:
self.cards[0] = self.hidden_card
else:
print('The dealer has no hidden card currently.')
|
the-stack_0_9846 | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aesara.tensor as at
import numpy as np
from aesara.tensor.random.basic import (
RandomVariable,
bernoulli,
betabinom,
binomial,
categorical,
geometric,
hypergeometric,
nbinom,
poisson,
)
from scipy import stats
import pymc as pm
from pymc.aesaraf import floatX, intX, take_along_axis
from pymc.distributions.dist_math import (
betaln,
binomln,
check_parameters,
factln,
log_diff_normal_cdf,
logpow,
normal_lccdf,
normal_lcdf,
)
from pymc.distributions.distribution import Discrete
from pymc.distributions.logprob import logcdf, logp
from pymc.distributions.shape_utils import rv_size_is_none
from pymc.math import sigmoid
__all__ = [
"Binomial",
"BetaBinomial",
"Bernoulli",
"DiscreteWeibull",
"Poisson",
"NegativeBinomial",
"Constant",
"ZeroInflatedPoisson",
"ZeroInflatedBinomial",
"ZeroInflatedNegativeBinomial",
"DiscreteUniform",
"Geometric",
"HyperGeometric",
"Categorical",
"OrderedLogistic",
"OrderedProbit",
]
class Binomial(Discrete):
R"""
Binomial log-likelihood.
The discrete probability distribution of the number of successes
in a sequence of n independent yes/no experiments, each of which
yields success with probability p.
The pmf of this distribution is
.. math:: f(x \mid n, p) = \binom{n}{x} p^x (1-p)^{n-x}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(0, 22)
ns = [10, 17]
ps = [0.5, 0.7]
for n, p in zip(ns, ps):
pmf = st.binom.pmf(x, n, p)
plt.plot(x, pmf, '-o', label='n = {}, p = {}'.format(n, p))
plt.xlabel('x', fontsize=14)
plt.ylabel('f(x)', fontsize=14)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n p`
Variance :math:`n p (1 - p)`
======== ==========================================
Parameters
----------
n: int
Number of Bernoulli trials (n >= 0).
p: float
Probability of success in each trial (0 < p < 1).
"""
rv_op = binomial
@classmethod
def dist(cls, n, p, *args, **kwargs):
n = at.as_tensor_variable(intX(n))
p = at.as_tensor_variable(floatX(p))
return super().dist([n, p], **kwargs)
def get_moment(rv, size, n, p):
mean = at.round(n * p)
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, n, p):
r"""
Calculate log-probability of Binomial distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.or_(at.lt(value, 0), at.gt(value, n)),
-np.inf,
binomln(n, value) + logpow(p, value) + logpow(1 - p, n - value),
)
return check_parameters(res, 0 < n, 0 <= p, p <= 1, msg="n > 0, 0 <= p <= 1")
def logcdf(value, n, p):
"""
Compute the log of the cumulative distribution function for Binomial distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
value = at.floor(value)
res = at.switch(
at.lt(value, 0),
-np.inf,
at.switch(
at.lt(value, n),
at.log(at.betainc(n - value, value + 1, 1 - p)),
0,
),
)
return check_parameters(
res,
0 < n,
0 <= p,
p <= 1,
msg="n > 0, 0 <= p <= 1",
)
class BetaBinomial(Discrete):
R"""
Beta-binomial log-likelihood.
Equivalent to binomial random variable with success probability
drawn from a beta distribution.
The pmf of this distribution is
.. math::
f(x \mid \alpha, \beta, n) =
\binom{n}{x}
\frac{B(x + \alpha, n - x + \beta)}{B(\alpha, \beta)}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def BetaBinom(a, b, n, x):
pmf = special.binom(n, x) * (special.beta(x+a, n-x+b) / special.beta(a, b))
return pmf
x = np.arange(0, 11)
alphas = [0.5, 1, 2.3]
betas = [0.5, 1, 2]
n = 10
for a, b in zip(alphas, betas):
pmf = BetaBinom(a, b, n, x)
plt.plot(x, pmf, '-o', label=r'$\alpha$ = {}, $\beta$ = {}, n = {}'.format(a, b, n))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=9)
plt.show()
======== =================================================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n \dfrac{\alpha}{\alpha + \beta}`
Variance :math:`n \dfrac{\alpha \beta}{(\alpha+\beta)^2 (\alpha+\beta+1)}`
======== =================================================================
Parameters
----------
n: int
Number of Bernoulli trials (n >= 0).
alpha: float
alpha > 0.
beta: float
beta > 0.
"""
rv_op = betabinom
@classmethod
def dist(cls, alpha, beta, n, *args, **kwargs):
alpha = at.as_tensor_variable(floatX(alpha))
beta = at.as_tensor_variable(floatX(beta))
n = at.as_tensor_variable(intX(n))
return super().dist([n, alpha, beta], **kwargs)
def get_moment(rv, size, n, alpha, beta):
mean = at.round((n * alpha) / (alpha + beta))
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, n, alpha, beta):
r"""
Calculate log-probability of BetaBinomial distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.or_(at.lt(value, 0), at.gt(value, n)),
-np.inf,
binomln(n, value) + betaln(value + alpha, n - value + beta) - betaln(alpha, beta),
)
return check_parameters(res, n >= 0, alpha > 0, beta > 0, msg="n >= 0, alpha > 0, beta > 0")
def logcdf(value, n, alpha, beta):
"""
Compute the log of the cumulative distribution function for BetaBinomial distribution
at the specified value.
Parameters
----------
value: numeric
Value for which log CDF is calculated.
Returns
-------
TensorVariable
"""
# logcdf can only handle scalar values at the moment
if np.ndim(value):
raise TypeError(
f"BetaBinomial.logcdf expects a scalar value but received a {np.ndim(value)}-dimensional object."
)
safe_lower = at.switch(at.lt(value, 0), value, 0)
res = at.switch(
at.lt(value, 0),
-np.inf,
at.switch(
at.lt(value, n),
at.logsumexp(
logp(
BetaBinomial.dist(alpha=alpha, beta=beta, n=n),
at.arange(safe_lower, value + 1),
),
keepdims=False,
),
0,
),
)
return check_parameters(res, 0 <= n, 0 < alpha, 0 < beta, msg="n >= 0, alpha > 0, beta > 0")
class Bernoulli(Discrete):
R"""Bernoulli log-likelihood
The Bernoulli distribution describes the probability of successes
(x=1) and failures (x=0).
The pmf of this distribution is
.. math:: f(x \mid p) = p^{x} (1-p)^{1-x}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = [0, 1]
for p in [0, 0.5, 0.8]:
pmf = st.bernoulli.pmf(x, p)
plt.plot(x, pmf, '-o', label='p = {}'.format(p))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=9)
plt.show()
======== ======================
Support :math:`x \in \{0, 1\}`
Mean :math:`p`
Variance :math:`p (1 - p)`
======== ======================
The bernoulli distribution can be parametrized either in terms of p or logit_p.
The link between the parametrizations is given by
.. math:: logit(p) = ln(\frac{p}{1-p})
Parameters
----------
p: float
Probability of success (0 < p < 1).
logit_p: float
Alternative log odds for the probability of success.
"""
rv_op = bernoulli
@classmethod
def dist(cls, p=None, logit_p=None, *args, **kwargs):
if p is not None and logit_p is not None:
raise ValueError("Incompatible parametrization. Can't specify both p and logit_p.")
elif p is None and logit_p is None:
raise ValueError("Incompatible parametrization. Must specify either p or logit_p.")
if logit_p is not None:
p = at.sigmoid(logit_p)
p = at.as_tensor_variable(floatX(p))
return super().dist([p], **kwargs)
def get_moment(rv, size, p):
if not rv_size_is_none(size):
p = at.full(size, p)
return at.switch(p < 0.5, 0, 1)
def logp(value, p):
r"""
Calculate log-probability of Bernoulli distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.or_(at.lt(value, 0), at.gt(value, 1)),
-np.inf,
at.switch(value, at.log(p), at.log1p(-p)),
)
return check_parameters(res, p >= 0, p <= 1, msg="0 <= p <= 1")
def logcdf(value, p):
"""
Compute the log of the cumulative distribution function for Bernoulli distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.switch(
at.lt(value, 1),
at.log1p(-p),
0,
),
)
return check_parameters(res, 0 <= p, p <= 1, msg="0 <= p <= 1")
class DiscreteWeibullRV(RandomVariable):
name = "discrete_weibull"
ndim_supp = 0
ndims_params = [0, 0]
dtype = "int64"
_print_name = ("dWeibull", "\\operatorname{dWeibull}")
@classmethod
def rng_fn(cls, rng, q, beta, size):
p = rng.uniform(size=size)
return np.ceil(np.power(np.log(1 - p) / np.log(q), 1.0 / beta)) - 1
discrete_weibull = DiscreteWeibullRV()
class DiscreteWeibull(Discrete):
R"""Discrete Weibull log-likelihood
The discrete Weibull distribution is a flexible model of count data that
can handle both over- and under-dispersion.
The pmf of this distribution is
.. math:: f(x \mid q, \beta) = q^{x^{\beta}} - q^{(x + 1)^{\beta}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def DiscreteWeibull(q, b, x):
return q**(x**b) - q**((x + 1)**b)
x = np.arange(0, 10)
qs = [0.1, 0.9, 0.9]
betas = [0.3, 1.3, 3]
for q, b in zip(qs, betas):
pmf = DiscreteWeibull(q, b, x)
plt.plot(x, pmf, '-o', label=r'q = {}, $\beta$ = {}'.format(q, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=1)
plt.show()
======== ======================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu = \sum_{x = 1}^{\infty} q^{x^{\beta}}`
Variance :math:`2 \sum_{x = 1}^{\infty} x q^{x^{\beta}} - \mu - \mu^2`
======== ======================
"""
rv_op = discrete_weibull
@classmethod
def dist(cls, q, beta, *args, **kwargs):
q = at.as_tensor_variable(floatX(q))
beta = at.as_tensor_variable(floatX(beta))
return super().dist([q, beta], **kwargs)
def logp(value, q, beta):
r"""
Calculate log-probability of DiscreteWeibull distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.log(at.power(q, at.power(value, beta)) - at.power(q, at.power(value + 1, beta))),
)
return check_parameters(res, 0 < q, q < 1, 0 < beta, msg="0 < q < 1, beta > 0")
def logcdf(value, q, beta):
"""
Compute the log of the cumulative distribution function for Discrete Weibull distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.log1p(-at.power(q, at.power(value + 1, beta))),
)
return check_parameters(res, 0 < q, q < 1, 0 < beta, msg="0 < q < 1, beta > 0")
class Poisson(Discrete):
R"""
Poisson log-likelihood.
Often used to model the number of events occurring in a fixed period
of time when the times at which events occur are independent.
The pmf of this distribution is
.. math:: f(x \mid \mu) = \frac{e^{-\mu}\mu^x}{x!}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(0, 15)
for m in [0.5, 3, 8]:
pmf = st.poisson.pmf(x, m)
plt.plot(x, pmf, '-o', label='$\mu$ = {}'.format(m))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
Variance :math:`\mu`
======== ==========================
Parameters
----------
mu: float
Expected number of occurrences during the given interval
(mu >= 0).
Notes
-----
The Poisson distribution can be derived as a limiting case of the
binomial distribution.
"""
rv_op = poisson
@classmethod
def dist(cls, mu, *args, **kwargs):
mu = at.as_tensor_variable(floatX(mu))
return super().dist([mu], *args, **kwargs)
def get_moment(rv, size, mu):
mu = at.floor(mu)
if not rv_size_is_none(size):
mu = at.full(size, mu)
return mu
def logp(value, mu):
r"""
Calculate log-probability of Poisson distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
logpow(mu, value) - factln(value) - mu,
)
log_prob = check_parameters(res, mu >= 0, msg="mu >= 0")
# Return zero when mu and value are both zero
return at.switch(at.eq(mu, 0) * at.eq(value, 0), 0, log_prob)
def logcdf(value, mu):
"""
Compute the log of the cumulative distribution function for Poisson distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
value = at.floor(value)
# Avoid C-assertion when the gammaincc function is called with invalid values (#4340)
safe_mu = at.switch(at.lt(mu, 0), 0, mu)
safe_value = at.switch(at.lt(value, 0), 0, value)
res = (
at.switch(
at.lt(value, 0),
-np.inf,
at.log(at.gammaincc(safe_value + 1, safe_mu)),
),
)
return check_parameters(res, 0 <= mu, msg="mu >= 0")
class NegativeBinomial(Discrete):
R"""
Negative binomial log-likelihood.
The negative binomial distribution describes a Poisson random variable
whose rate parameter is gamma distributed.
The pmf of this distribution is
.. math::
f(x \mid \mu, \alpha) =
\binom{x + \alpha - 1}{x}
(\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def NegBinom(a, m, x):
pmf = special.binom(x + a - 1, x) * (a / (m + a))**a * (m / (m + a))**x
return pmf
x = np.arange(0, 22)
alphas = [0.9, 2, 4]
mus = [1, 2, 8]
for a, m in zip(alphas, mus):
pmf = NegBinom(a, m, x)
plt.plot(x, pmf, '-o', label=r'$\alpha$ = {}, $\mu$ = {}'.format(a, m))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
======== ==========================
The negative binomial distribution can be parametrized either in terms of mu or p,
and either in terms of alpha or n. The link between the parametrizations is given by
.. math::
\mu &= \frac{n(1-p)}{p} \\
\alpha &= n
Parameters
----------
mu: float
Poission distribution parameter (mu > 0).
alpha: float
Gamma distribution parameter (alpha > 0).
p: float
Alternative probability of success in each trial (0 < p < 1).
n: float
Alternative number of target success trials (n > 0)
"""
rv_op = nbinom
@classmethod
def dist(cls, mu=None, alpha=None, p=None, n=None, *args, **kwargs):
n, p = cls.get_n_p(mu=mu, alpha=alpha, p=p, n=n)
n = at.as_tensor_variable(floatX(n))
p = at.as_tensor_variable(floatX(p))
return super().dist([n, p], *args, **kwargs)
@classmethod
def get_n_p(cls, mu=None, alpha=None, p=None, n=None):
if n is None:
if alpha is not None:
n = alpha
else:
raise ValueError("Incompatible parametrization. Must specify either alpha or n.")
elif alpha is not None:
raise ValueError("Incompatible parametrization. Can't specify both alpha and n.")
if p is None:
if mu is not None:
p = n / (mu + n)
else:
raise ValueError("Incompatible parametrization. Must specify either mu or p.")
elif mu is not None:
raise ValueError("Incompatible parametrization. Can't specify both mu and p.")
return n, p
def get_moment(rv, size, n, p):
mu = at.floor(n * (1 - p) / p)
if not rv_size_is_none(size):
mu = at.full(size, mu)
return mu
def logp(value, n, p):
r"""
Calculate log-probability of NegativeBinomial distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
alpha = n
mu = alpha * (1 - p) / p
res = at.switch(
at.lt(value, 0),
-np.inf,
(
binomln(value + alpha - 1, value)
+ logpow(mu / (mu + alpha), value)
+ logpow(alpha / (mu + alpha), alpha)
),
)
negbinom = check_parameters(
res,
mu > 0,
alpha > 0,
msg="mu > 0, alpha > 0",
)
# Return Poisson when alpha gets very large.
return at.switch(at.gt(alpha, 1e10), logp(Poisson.dist(mu=mu), value), negbinom)
def logcdf(value, n, p):
"""
Compute the log of the cumulative distribution function for NegativeBinomial distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.log(at.betainc(n, at.floor(value) + 1, p)),
)
return check_parameters(
res,
0 < n,
0 <= p,
p <= 1,
msg="0 < n, 0 <= p <= 1",
)
class Geometric(Discrete):
R"""
Geometric log-likelihood.
The probability that the first success in a sequence of Bernoulli
trials occurs on the x'th trial.
The pmf of this distribution is
.. math:: f(x \mid p) = p(1-p)^{x-1}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(1, 11)
for p in [0.1, 0.25, 0.75]:
pmf = st.geom.pmf(x, p)
plt.plot(x, pmf, '-o', label='p = {}'.format(p))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== =============================
Support :math:`x \in \mathbb{N}_{>0}`
Mean :math:`\dfrac{1}{p}`
Variance :math:`\dfrac{1 - p}{p^2}`
======== =============================
Parameters
----------
p: float
Probability of success on an individual trial (0 < p <= 1).
"""
rv_op = geometric
@classmethod
def dist(cls, p, *args, **kwargs):
p = at.as_tensor_variable(floatX(p))
return super().dist([p], *args, **kwargs)
def get_moment(rv, size, p):
mean = at.round(1.0 / p)
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, p):
r"""
Calculate log-probability of Geometric distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 1),
-np.inf,
at.log(p) + logpow(1 - p, value - 1),
)
return check_parameters(
res,
0 <= p,
p <= 1,
msg="0 <= p <= 1",
)
def logcdf(value, p):
"""
Compute the log of the cumulative distribution function for Geometric distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.log1mexp(at.log1p(-p) * value),
)
return check_parameters(
res,
0 <= p,
p <= 1,
msg="0 <= p <= 1",
)
class HyperGeometric(Discrete):
R"""
Discrete hypergeometric distribution.
The probability of :math:`x` successes in a sequence of :math:`n` bernoulli
trials taken without replacement from a population of :math:`N` objects,
containing :math:`k` good (or successful or Type I) objects.
The pmf of this distribution is
.. math:: f(x \mid N, n, k) = \frac{\binom{k}{x}\binom{N-k}{n-x}}{\binom{N}{n}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(1, 15)
N = 50
k = 10
for n in [20, 25]:
pmf = st.hypergeom.pmf(x, N, k, n)
plt.plot(x, pmf, '-o', label='n = {}'.format(n))
plt.plot(x, pmf, '-o', label='N = {}'.format(N))
plt.plot(x, pmf, '-o', label='k = {}'.format(k))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== =============================
Support :math:`x \in \left[\max(0, n - N + k), \min(k, n)\right]`
Mean :math:`\dfrac{nk}{N}`
Variance :math:`\dfrac{(N-n)nk(N-k)}{(N-1)N^2}`
======== =============================
Parameters
----------
N : integer
Total size of the population
k : integer
Number of successful individuals in the population
n : integer
Number of samples drawn from the population
"""
rv_op = hypergeometric
@classmethod
def dist(cls, N, k, n, *args, **kwargs):
good = at.as_tensor_variable(intX(k))
bad = at.as_tensor_variable(intX(N - k))
n = at.as_tensor_variable(intX(n))
return super().dist([good, bad, n], *args, **kwargs)
def get_moment(rv, size, good, bad, n):
N, k = good + bad, good
mode = at.floor((n + 1) * (k + 1) / (N + 2))
if not rv_size_is_none(size):
mode = at.full(size, mode)
return mode
def logp(value, good, bad, n):
r"""
Calculate log-probability of HyperGeometric distribution at specified value.
Parameters
----------
value : numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
tot = good + bad
result = (
betaln(good + 1, 1)
+ betaln(bad + 1, 1)
+ betaln(tot - n + 1, n + 1)
- betaln(value + 1, good - value + 1)
- betaln(n - value + 1, bad - n + value + 1)
- betaln(tot + 1, 1)
)
# value in [max(0, n - N + k), min(k, n)]
lower = at.switch(at.gt(n - tot + good, 0), n - tot + good, 0)
upper = at.switch(at.lt(good, n), good, n)
res = at.switch(
at.lt(value, lower),
-np.inf,
at.switch(
at.le(value, upper),
result,
-np.inf,
),
)
return check_parameters(res, lower <= upper, msg="lower <= upper")
def logcdf(value, good, bad, n):
"""
Compute the log of the cumulative distribution function for HyperGeometric distribution
at the specified value.
Parameters
----------
value: numeric
Value for which log CDF is calculated.
Returns
-------
TensorVariable
"""
# logcdf can only handle scalar values at the moment
if np.ndim(value):
raise TypeError(
f"HyperGeometric.logcdf expects a scalar value but received a {np.ndim(value)}-dimensional object."
)
N = good + bad
# TODO: Use lower upper in locgdf for smarter logsumexp?
safe_lower = at.switch(at.lt(value, 0), value, 0)
res = at.switch(
at.lt(value, 0),
-np.inf,
at.switch(
at.lt(value, n),
at.logsumexp(
HyperGeometric.logp(at.arange(safe_lower, value + 1), good, bad, n),
keepdims=False,
),
0,
),
)
return check_parameters(
res,
0 < N,
0 <= good,
0 <= n,
good <= N,
n <= N,
msg="N > 0, 0 <= good <= N, 0 <= n <= N",
)
class DiscreteUniformRV(RandomVariable):
name = "discrete_uniform"
ndim_supp = 0
ndims_params = [0, 0]
dtype = "int64"
_print_name = ("DiscreteUniform", "\\operatorname{DiscreteUniform}")
@classmethod
def rng_fn(cls, rng, lower, upper, size=None):
return stats.randint.rvs(lower, upper + 1, size=size, random_state=rng)
discrete_uniform = DiscreteUniformRV()
class DiscreteUniform(Discrete):
R"""
Discrete uniform distribution.
The pmf of this distribution is
.. math:: f(x \mid lower, upper) = \frac{1}{upper-lower+1}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
ls = [1, -2]
us = [6, 2]
for l, u in zip(ls, us):
x = np.arange(l, u+1)
pmf = [1.0 / (u - l + 1)] * len(x)
plt.plot(x, pmf, '-o', label='lower = {}, upper = {}'.format(l, u))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0, 0.4)
plt.legend(loc=1)
plt.show()
======== ===============================================
Support :math:`x \in {lower, lower + 1, \ldots, upper}`
Mean :math:`\dfrac{lower + upper}{2}`
Variance :math:`\dfrac{(upper - lower)^2}{12}`
======== ===============================================
Parameters
----------
lower: int
Lower limit.
upper: int
Upper limit (upper > lower).
"""
rv_op = discrete_uniform
@classmethod
def dist(cls, lower, upper, *args, **kwargs):
lower = intX(at.floor(lower))
upper = intX(at.floor(upper))
return super().dist([lower, upper], **kwargs)
def get_moment(rv, size, lower, upper):
mode = at.maximum(at.floor((upper + lower) / 2.0), lower)
if not rv_size_is_none(size):
mode = at.full(size, mode)
return mode
def logp(value, lower, upper):
r"""
Calculate log-probability of DiscreteUniform distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.or_(at.lt(value, lower), at.gt(value, upper)),
-np.inf,
at.fill(value, -at.log(upper - lower + 1)),
)
return check_parameters(res, lower <= upper, msg="lower <= upper")
def logcdf(value, lower, upper):
"""
Compute the log of the cumulative distribution function for Discrete uniform distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.le(value, lower),
-np.inf,
at.switch(
at.lt(value, upper),
at.log(at.minimum(at.floor(value), upper) - lower + 1) - at.log(upper - lower + 1),
0,
),
)
return check_parameters(res, lower <= upper, msg="lower <= upper")
class Categorical(Discrete):
R"""
Categorical log-likelihood.
The most general discrete distribution. The pmf of this distribution is
.. math:: f(x \mid p) = p_x
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
ps = [[0.1, 0.6, 0.3], [0.3, 0.1, 0.1, 0.5]]
for p in ps:
x = range(len(p))
plt.plot(x, p, '-o', label='p = {}'.format(p))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=1)
plt.show()
======== ===================================
Support :math:`x \in \{0, 1, \ldots, |p|-1\}`
======== ===================================
Parameters
----------
p: array of floats
p > 0 and the elements of p must sum to 1. They will be automatically
rescaled otherwise.
"""
rv_op = categorical
@classmethod
def dist(cls, p, **kwargs):
p = at.as_tensor_variable(floatX(p))
return super().dist([p], **kwargs)
def get_moment(rv, size, p):
mode = at.argmax(p, axis=-1)
if not rv_size_is_none(size):
mode = at.full(size, mode)
return mode
def logp(value, p):
r"""
Calculate log-probability of Categorical distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or `TensorVariable`
"""
k = at.shape(p)[-1]
p_ = p
p = p_ / at.sum(p_, axis=-1, keepdims=True)
value_clip = at.clip(value, 0, k - 1)
if p.ndim > 1:
if p.ndim > value_clip.ndim:
value_clip = at.shape_padleft(value_clip, p_.ndim - value_clip.ndim)
elif p.ndim < value_clip.ndim:
p = at.shape_padleft(p, value_clip.ndim - p_.ndim)
pattern = (p.ndim - 1,) + tuple(range(p.ndim - 1))
a = at.log(
take_along_axis(
p.dimshuffle(pattern),
value_clip,
)
)
else:
a = at.log(p[value_clip])
res = at.switch(
at.or_(at.lt(value, 0), at.gt(value, k - 1)),
-np.inf,
a,
)
return check_parameters(
res, at.all(p_ >= 0, axis=-1), at.all(p <= 1, axis=-1), msg="0 <= p <=1"
)
class ConstantRV(RandomVariable):
name = "constant"
ndim_supp = 0
ndims_params = [0]
dtype = "floatX" # Should be treated as a discrete variable!
_print_name = ("Constant", "\\operatorname{Constant}")
@classmethod
def rng_fn(cls, rng, c, size=None):
if size is None:
return c.copy()
return np.full(size, c)
constant = ConstantRV()
class Constant(Discrete):
r"""
Constant log-likelihood.
Parameters
----------
value: float or int
Constant parameter.
"""
rv_op = constant
@classmethod
def dist(cls, c, *args, **kwargs):
c = at.as_tensor_variable(floatX(c))
return super().dist([c], **kwargs)
def get_moment(rv, size, c):
if not rv_size_is_none(size):
c = at.full(size, c)
return c
def logp(value, c):
r"""
Calculate log-probability of Constant distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
return at.switch(
at.eq(value, c),
at.zeros_like(value),
-np.inf,
)
class ZeroInflatedPoissonRV(RandomVariable):
name = "zero_inflated_poisson"
ndim_supp = 0
ndims_params = [0, 0]
dtype = "int64"
_print_name = ("ZeroInflatedPois", "\\operatorname{ZeroInflatedPois}")
@classmethod
def rng_fn(cls, rng, psi, lam, size):
return rng.poisson(lam, size=size) * (rng.random(size=size) < psi)
zero_inflated_poisson = ZeroInflatedPoissonRV()
class ZeroInflatedPoisson(Discrete):
R"""
Zero-inflated Poisson log-likelihood.
Often used to model the number of events occurring in a fixed period
of time when the times at which events occur are independent.
The pmf of this distribution is
.. math::
f(x \mid \psi, \theta) = \left\{ \begin{array}{l}
(1-\psi) + \psi e^{-\theta}, \text{if } x = 0 \\
\psi \frac{e^{-\theta}\theta^x}{x!}, \text{if } x=1,2,3,\ldots
\end{array} \right.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(0, 22)
psis = [0.7, 0.4]
thetas = [8, 4]
for psi, theta in zip(psis, thetas):
pmf = st.poisson.pmf(x, theta)
pmf[0] = (1 - psi) + pmf[0]
pmf[1:] = psi * pmf[1:]
pmf /= pmf.sum()
plt.plot(x, pmf, '-o', label='$\\psi$ = {}, $\\theta$ = {}'.format(psi, theta))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi\theta`
Variance :math:`\theta + \frac{1-\psi}{\psi}\theta^2`
======== ==========================
Parameters
----------
psi: float
Expected proportion of Poisson variates (0 < psi < 1)
theta: float
Expected number of occurrences during the given interval
(theta >= 0).
"""
rv_op = zero_inflated_poisson
@classmethod
def dist(cls, psi, theta, *args, **kwargs):
psi = at.as_tensor_variable(floatX(psi))
theta = at.as_tensor_variable(floatX(theta))
return super().dist([psi, theta], *args, **kwargs)
def get_moment(rv, size, psi, theta):
mean = at.floor(psi * theta)
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, psi, theta):
r"""
Calculate log-probability of ZeroInflatedPoisson distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.gt(value, 0),
at.log(psi) + logp(Poisson.dist(mu=theta), value),
at.logaddexp(at.log1p(-psi), at.log(psi) - theta),
)
res = at.switch(at.lt(value, 0), -np.inf, res)
return check_parameters(
res,
0 <= psi,
psi <= 1,
0 <= theta,
msg="0 <= psi <= 1, theta >= 0",
)
def logcdf(value, psi, theta):
"""
Compute the log of the cumulative distribution function for ZeroInflatedPoisson distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.logaddexp(
at.log1p(-psi),
at.log(psi) + logcdf(Poisson.dist(mu=theta), value),
),
)
return check_parameters(
res, 0 <= psi, psi <= 1, 0 <= theta, msg="0 <= psi <= 1, theta >= 0"
)
class ZeroInflatedBinomialRV(RandomVariable):
name = "zero_inflated_binomial"
ndim_supp = 0
ndims_params = [0, 0, 0]
dtype = "int64"
_print_name = ("ZeroInflatedBinom", "\\operatorname{ZeroInflatedBinom}")
@classmethod
def rng_fn(cls, rng, psi, n, p, size):
return rng.binomial(n=n, p=p, size=size) * (rng.random(size=size) < psi)
zero_inflated_binomial = ZeroInflatedBinomialRV()
class ZeroInflatedBinomial(Discrete):
R"""
Zero-inflated Binomial log-likelihood.
The pmf of this distribution is
.. math::
f(x \mid \psi, n, p) = \left\{ \begin{array}{l}
(1-\psi) + \psi (1-p)^{n}, \text{if } x = 0 \\
\psi {n \choose x} p^x (1-p)^{n-x}, \text{if } x=1,2,3,\ldots,n
\end{array} \right.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(0, 25)
ns = [10, 20]
ps = [0.5, 0.7]
psis = [0.7, 0.4]
for n, p, psi in zip(ns, ps, psis):
pmf = st.binom.pmf(x, n, p)
pmf[0] = (1 - psi) + pmf[0]
pmf[1:] = psi * pmf[1:]
pmf /= pmf.sum()
plt.plot(x, pmf, '-o', label='n = {}, p = {}, $\\psi$ = {}'.format(n, p, psi))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi n p`
Variance :math:`(1-\psi) n p [1 - p(1 - \psi n)].`
======== ==========================
Parameters
----------
psi: float
Expected proportion of Binomial variates (0 < psi < 1)
n: int
Number of Bernoulli trials (n >= 0).
p: float
Probability of success in each trial (0 < p < 1).
"""
rv_op = zero_inflated_binomial
@classmethod
def dist(cls, psi, n, p, *args, **kwargs):
psi = at.as_tensor_variable(floatX(psi))
n = at.as_tensor_variable(intX(n))
p = at.as_tensor_variable(floatX(p))
return super().dist([psi, n, p], *args, **kwargs)
def get_moment(rv, size, psi, n, p):
mean = at.round(psi * n * p)
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, psi, n, p):
r"""
Calculate log-probability of ZeroInflatedBinomial distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.gt(value, 0),
at.log(psi) + logp(Binomial.dist(n=n, p=p), value),
at.logaddexp(at.log1p(-psi), at.log(psi) + n * at.log1p(-p)),
)
res = at.switch(
at.lt(value, 0),
-np.inf,
res,
)
return check_parameters(
res,
0 <= psi,
psi <= 1,
0 <= p,
p <= 1,
msg="0 <= psi <= 1, 0 <= p <= 1",
)
def logcdf(value, psi, n, p):
"""
Compute the log of the cumulative distribution function for ZeroInflatedBinomial distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.or_(at.lt(value, 0), at.gt(value, n)),
-np.inf,
at.logaddexp(
at.log1p(-psi),
at.log(psi) + logcdf(Binomial.dist(n=n, p=p), value),
),
)
return check_parameters(
res,
0 <= psi,
psi <= 1,
0 <= p,
p <= 1,
msg="0 <= psi <= 1, 0 <= p <= 1",
)
class ZeroInflatedNegBinomialRV(RandomVariable):
name = "zero_inflated_neg_binomial"
ndim_supp = 0
ndims_params = [0, 0, 0]
dtype = "int64"
_print_name = (
"ZeroInflatedNegBinom",
"\\operatorname{ZeroInflatedNegBinom}",
)
@classmethod
def rng_fn(cls, rng, psi, n, p, size):
return rng.negative_binomial(n=n, p=p, size=size) * (rng.random(size=size) < psi)
zero_inflated_neg_binomial = ZeroInflatedNegBinomialRV()
class ZeroInflatedNegativeBinomial(Discrete):
R"""
Zero-Inflated Negative binomial log-likelihood.
The Zero-inflated version of the Negative Binomial (NB).
The NB distribution describes a Poisson random variable
whose rate parameter is gamma distributed.
The pmf of this distribution is
.. math::
f(x \mid \psi, \mu, \alpha) = \left\{
\begin{array}{l}
(1-\psi) + \psi \left (
\frac{\alpha}{\alpha+\mu}
\right) ^\alpha, \text{if } x = 0 \\
\psi \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} \left (
\frac{\alpha}{\mu+\alpha}
\right)^\alpha \left(
\frac{\mu}{\mu+\alpha}
\right)^x, \text{if } x=1,2,3,\ldots
\end{array}
\right.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def ZeroInfNegBinom(a, m, psi, x):
pmf = special.binom(x + a - 1, x) * (a / (m + a))**a * (m / (m + a))**x
pmf[0] = (1 - psi) + pmf[0]
pmf[1:] = psi * pmf[1:]
pmf /= pmf.sum()
return pmf
x = np.arange(0, 25)
alphas = [2, 4]
mus = [2, 8]
psis = [0.7, 0.7]
for a, m, psi in zip(alphas, mus, psis):
pmf = ZeroInfNegBinom(a, m, psi, x)
plt.plot(x, pmf, '-o', label=r'$\alpha$ = {}, $\mu$ = {}, $\psi$ = {}'.format(a, m, psi))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi\mu`
Var :math:`\psi\mu + \left (1 + \frac{\mu}{\alpha} + \frac{1-\psi}{\mu} \right)`
======== ==========================
The zero inflated negative binomial distribution can be parametrized
either in terms of mu or p, and either in terms of alpha or n.
The link between the parametrizations is given by
.. math::
\mu &= \frac{n(1-p)}{p} \\
\alpha &= n
Parameters
----------
psi: float
Expected proportion of NegativeBinomial variates (0 < psi < 1)
mu: float
Poission distribution parameter (mu > 0).
alpha: float
Gamma distribution parameter (alpha > 0).
p: float
Alternative probability of success in each trial (0 < p < 1).
n: float
Alternative number of target success trials (n > 0)
"""
rv_op = zero_inflated_neg_binomial
@classmethod
def dist(cls, psi, mu=None, alpha=None, p=None, n=None, *args, **kwargs):
psi = at.as_tensor_variable(floatX(psi))
n, p = NegativeBinomial.get_n_p(mu=mu, alpha=alpha, p=p, n=n)
n = at.as_tensor_variable(floatX(n))
p = at.as_tensor_variable(floatX(p))
return super().dist([psi, n, p], *args, **kwargs)
def get_moment(rv, size, psi, n, p):
mean = at.floor(psi * n * (1 - p) / p)
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, psi, n, p):
r"""
Calculate log-probability of ZeroInflatedNegativeBinomial distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.gt(value, 0),
at.log(psi) + logp(NegativeBinomial.dist(n=n, p=p), value),
at.logaddexp(at.log1p(-psi), at.log(psi) + n * at.log(p)),
)
res = at.switch(
at.lt(value, 0),
-np.inf,
res,
)
return check_parameters(
res,
0 <= psi,
psi <= 1,
0 < n,
0 <= p,
p <= 1,
msg="0 <= psi <= 1, n > 0, 0 <= p <= 1",
)
def logcdf(value, psi, n, p):
"""
Compute the log of the cumulative distribution function for ZeroInflatedNegativeBinomial distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.logaddexp(
at.log1p(-psi),
at.log(psi) + logcdf(NegativeBinomial.dist(n=n, p=p), value),
),
)
return check_parameters(
res,
0 <= psi,
psi <= 1,
0 < n,
0 < p,
p <= 1,
msg="0 <= psi <= 1, n > 0, 0 < p <= 1",
)
class _OrderedLogistic(Categorical):
r"""
Underlying class for ordered logistic distributions.
See docs for the OrderedLogistic wrapper class for more details on how to use it in models.
"""
rv_op = categorical
@classmethod
def dist(cls, eta, cutpoints, *args, **kwargs):
eta = at.as_tensor_variable(floatX(eta))
cutpoints = at.as_tensor_variable(cutpoints)
pa = sigmoid(cutpoints - at.shape_padright(eta))
p_cum = at.concatenate(
[
at.zeros_like(at.shape_padright(pa[..., 0])),
pa,
at.ones_like(at.shape_padright(pa[..., 0])),
],
axis=-1,
)
p = p_cum[..., 1:] - p_cum[..., :-1]
return super().dist(p, *args, **kwargs)
class OrderedLogistic:
R"""
Wrapper class for Ordered Logistic distributions.
Useful for regression on ordinal data values whose values range
from 1 to K as a function of some predictor, :math:`\eta`. The
cutpoints, :math:`c`, separate which ranges of :math:`\eta` are
mapped to which of the K observed dependent variables. The number
of cutpoints is K - 1. It is recommended that the cutpoints are
constrained to be ordered.
.. math::
f(k \mid \eta, c) = \left\{
\begin{array}{l}
1 - \text{logit}^{-1}(\eta - c_1)
\,, \text{if } k = 0 \\
\text{logit}^{-1}(\eta - c_{k - 1}) -
\text{logit}^{-1}(\eta - c_{k})
\,, \text{if } 0 < k < K \\
\text{logit}^{-1}(\eta - c_{K - 1})
\,, \text{if } k = K \\
\end{array}
\right.
Parameters
----------
eta: float
The predictor.
cutpoints: array
The length K - 1 array of cutpoints which break :math:`\eta` into
ranges. Do not explicitly set the first and last elements of
:math:`c` to negative and positive infinity.
compute_p: boolean, default True
Whether to compute and store in the trace the inferred probabilities of each categories,
based on the cutpoints' values. Defaults to True.
Might be useful to disable it if memory usage is of interest.
Examples
--------
.. code-block:: python
# Generate data for a simple 1 dimensional example problem
n1_c = 300; n2_c = 300; n3_c = 300
cluster1 = np.random.randn(n1_c) + -1
cluster2 = np.random.randn(n2_c) + 0
cluster3 = np.random.randn(n3_c) + 2
x = np.concatenate((cluster1, cluster2, cluster3))
y = np.concatenate((1*np.ones(n1_c),
2*np.ones(n2_c),
3*np.ones(n3_c))) - 1
# Ordered logistic regression
with pm.Model() as model:
cutpoints = pm.Normal("cutpoints", mu=[-1,1], sigma=10, shape=2,
transform=pm.distributions.transforms.ordered)
y_ = pm.OrderedLogistic("y", cutpoints=cutpoints, eta=x, observed=y)
idata = pm.sample()
# Plot the results
plt.hist(cluster1, 30, alpha=0.5);
plt.hist(cluster2, 30, alpha=0.5);
plt.hist(cluster3, 30, alpha=0.5);
posterior = idata.posterior.stack(sample=("chain", "draw"))
plt.hist(posterior["cutpoints"][0], 80, alpha=0.2, color='k');
plt.hist(posterior["cutpoints"][1], 80, alpha=0.2, color='k');
"""
def __new__(cls, name, *args, compute_p=True, **kwargs):
out_rv = _OrderedLogistic(name, *args, **kwargs)
if compute_p:
pm.Deterministic(f"{name}_probs", out_rv.owner.inputs[3], dims=kwargs.get("dims"))
return out_rv
@classmethod
def dist(cls, *args, **kwargs):
return _OrderedLogistic.dist(*args, **kwargs)
class _OrderedProbit(Categorical):
r"""
Underlying class for ordered probit distributions.
See docs for the OrderedProbit wrapper class for more details on how to use it in models.
"""
rv_op = categorical
@classmethod
def dist(cls, eta, cutpoints, sigma=1, *args, **kwargs):
eta = at.as_tensor_variable(floatX(eta))
cutpoints = at.as_tensor_variable(cutpoints)
probits = at.shape_padright(eta) - cutpoints
_log_p = at.concatenate(
[
at.shape_padright(normal_lccdf(0, sigma, probits[..., 0])),
log_diff_normal_cdf(0, sigma, probits[..., :-1], probits[..., 1:]),
at.shape_padright(normal_lcdf(0, sigma, probits[..., -1])),
],
axis=-1,
)
_log_p = at.as_tensor_variable(floatX(_log_p))
p = at.exp(_log_p)
return super().dist(p, *args, **kwargs)
class OrderedProbit:
R"""
Wrapper class for Ordered Probit distributions.
Useful for regression on ordinal data values whose values range
from 1 to K as a function of some predictor, :math:`\eta`. The
cutpoints, :math:`c`, separate which ranges of :math:`\eta` are
mapped to which of the K observed dependent variables. The number
of cutpoints is K - 1. It is recommended that the cutpoints are
constrained to be ordered.
In order to stabilize the computation, log-likelihood is computed
in log space using the scaled error function `erfcx`.
.. math::
f(k \mid \eta, c) = \left\{
\begin{array}{l}
1 - \text{normal_cdf}(0, \sigma, \eta - c_1)
\,, \text{if } k = 0 \\
\text{normal_cdf}(0, \sigma, \eta - c_{k - 1}) -
\text{normal_cdf}(0, \sigma, \eta - c_{k})
\,, \text{if } 0 < k < K \\
\text{normal_cdf}(0, \sigma, \eta - c_{K - 1})
\,, \text{if } k = K \\
\end{array}
\right.
Parameters
----------
eta: float
The predictor.
cutpoints: array
The length K - 1 array of cutpoints which break :math:`\eta` into
ranges. Do not explicitly set the first and last elements of
:math:`c` to negative and positive infinity.
sigma: float, default 1.0
Standard deviation of the probit function.
compute_p: boolean, default True
Whether to compute and store in the trace the inferred probabilities of each categories,
based on the cutpoints' values. Defaults to True.
Might be useful to disable it if memory usage is of interest.
Example
--------
.. code:: python
# Generate data for a simple 1 dimensional example problem
n1_c = 300; n2_c = 300; n3_c = 300
cluster1 = np.random.randn(n1_c) + -1
cluster2 = np.random.randn(n2_c) + 0
cluster3 = np.random.randn(n3_c) + 2
x = np.concatenate((cluster1, cluster2, cluster3))
y = np.concatenate((1*np.ones(n1_c),
2*np.ones(n2_c),
3*np.ones(n3_c))) - 1
# Ordered probit regression
with pm.Model() as model:
cutpoints = pm.Normal("cutpoints", mu=[-1,1], sigma=10, shape=2,
transform=pm.distributions.transforms.ordered)
y_ = pm.OrderedProbit("y", cutpoints=cutpoints, eta=x, observed=y)
idata = pm.sample()
# Plot the results
plt.hist(cluster1, 30, alpha=0.5);
plt.hist(cluster2, 30, alpha=0.5);
plt.hist(cluster3, 30, alpha=0.5);
posterior = idata.posterior.stack(sample=("chain", "draw"))
plt.hist(posterior["cutpoints"][0], 80, alpha=0.2, color='k');
plt.hist(posterior["cutpoints"][1], 80, alpha=0.2, color='k');
"""
def __new__(cls, name, *args, compute_p=True, **kwargs):
out_rv = _OrderedProbit(name, *args, **kwargs)
if compute_p:
pm.Deterministic(f"{name}_probs", out_rv.owner.inputs[3], dims=kwargs.get("dims"))
return out_rv
@classmethod
def dist(cls, *args, **kwargs):
return _OrderedProbit.dist(*args, **kwargs)
|
the-stack_0_9847 | #!/usr/bin/env python3
# Complete the maxSubsetSum function below.
def maxSubsetSum(arr):
if len(arr) == 0: return 0
if len(arr) == 1: return arr[0]
arr[0] = max(0, arr[0])
arr[1] = max(arr[0], arr[1])
for i in range(2, len(arr)):
arr[i] = max(arr[i - 1], arr[i] + arr[i - 2])
return arr[-1]
if __name__ == "__main__":
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = maxSubsetSum(arr)
print(res)
|
the-stack_0_9849 | lista = []
jogador = dict()
golslist = []
total = 0
while True:
jogador.clear()
golslist.clear()
jogador['nome'] = str(input('Nome do jogador: '))
jogador['partidas'] = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
for i in range(0, jogador['partidas']):
gol = int(input(f'Quantos gols na partida {i}? '))
golslist.append(gol)
total += gol
jogador['gols'] = golslist[:]
jogador['total'] = total
lista.append(jogador.copy())
continuar = str(input('Quer continuar? [S/N] '))
if continuar in 'Nn':
break
print('-='*20)
for i in jogador.keys():
print(f'{i:<12}', end='')
print()
print('-'*40)
for k, v in enumerate(lista):
print(f'{k:<4} ', end='')
for d in v.values():
print(f'{str(d):<10}', end='')
print()
while True:
a = int(input('Mostrar dados de qual jogador? '))
if a == 999:
print('<< VOLTE SEMPRE >>')
break
elif a >= len(lista):
print('ERRO, digite um numero valido')
else:
print(f'--LEVANTAMENTO DO JOGADOR {lista[a]["nome"]}.')
for p in range(0, lista[a]['partidas']):
print(f'No jogo {p} fez {lista[a]["gols"][p]} gols.')
print('-'*30)
|
the-stack_0_9852 | import curses
import time
from sudoku import Sudoku
menu = ["Rules", "Play", "Exit"]
submenu = ["Easy", "Hard", "Exit"]
def intro_message():
welcome_message = """
Welcome to Sudoku
Rules:
All rows should have the digits 1-9, without repition.
All columns should have the digits 1-9, without repition.
All 9 sub-matrices should have the digits 1-9, without repition.
To play, enter the row, column, and answer at the command prompt. The
Format is: <row> <column> <value>
Type exit to leave
Please note this game uses 0 indexing
Good luck!\n
"""
return welcome_message
def print_subject(stdscr, w, text):
text_x = w // 2 - len(text) // 2
stdscr.addstr(5, text_x, text)
def print_menu(stdscr, curr_row, curr_menu, text):
stdscr.clear()
h, w = stdscr.getmaxyx()
title_x = w // 2 - len(text) // 2
stdscr.addstr(5, title_x, text)
for idx, row in enumerate(curr_menu):
x = w // 2 - len(row) // 2
y = h // 2 - len(menu) // 2 + idx
if idx == curr_row:
stdscr.attron(curses.color_pair(1))
stdscr.addstr(y, x, row)
stdscr.attroff(curses.color_pair(1))
else:
stdscr.addstr(y, x, row)
stdscr.refresh()
def print_center(stdscr, text):
stdscr.clear()
h, w = stdscr.getmaxyx()
x = w // 2 - len(text) // 2
y = h // 2
stdscr.addstr(y, x, text)
stdscr.refresh()
def sub_menu(stdscr):
submenu_row = 0
print_menu(stdscr, submenu_row, submenu, "Pick a Difficulty")
while True:
sub_key = stdscr.getch()
if sub_key == curses.KEY_UP and submenu_row > 0:
submenu_row -= 1
elif sub_key == curses.KEY_DOWN and submenu_row < len(submenu) - 1:
submenu_row += 1
if sub_key == sub_key in [10, 13]:
if submenu[submenu_row] == "Easy":
print_center(stdscr, "'{}' selected".format(submenu[submenu_row]))
start_game(submenu[submenu_row])
elif submenu[submenu_row] == "Hard":
print_center(stdscr, "'{}' selected".format(submenu[submenu_row]))
start_game(submenu[submenu_row])
elif submenu[submenu_row] == "Exit":
print_center(stdscr, "'{}' selected".format(submenu[submenu_row]))
return
print_menu(stdscr, submenu_row, submenu, "Pick a Difficulty")
def start_game(difficulty):
time.sleep(1)
curses.nocbreak()
curses.echo()
curses.endwin()
Sudoku.run(difficulty)
def main(stdscr):
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
current_row = 0
print_menu(stdscr, current_row, menu, "Sudoku!")
while True:
key = stdscr.getch()
if key == curses.KEY_UP and current_row > 0:
current_row -= 1
elif key == curses.KEY_DOWN and current_row < len(menu) - 1:
current_row += 1
elif key == key in [10, 13]:
if menu[current_row] == "Rules":
stdscr.addstr(5, 5, intro_message())
stdscr.getch()
elif menu[current_row] == "Play":
sub_menu(stdscr)
elif menu[current_row] != "Exit":
print_center(stdscr, "'{}' selected".format(menu[current_row]))
stdscr.getch()
if current_row == len(menu) - 1:
break
print_menu(stdscr, current_row, menu, "Sudoku!")
if __name__ == "__main__":
curses.wrapper(main)
|
the-stack_0_9853 | from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from guardian.shortcuts import assign_perm, remove_perm
from grandchallenge.cases.models import Image
from grandchallenge.reader_studies.models import Answer, ReaderStudy
from grandchallenge.reader_studies.tasks import add_scores
@receiver(m2m_changed, sender=ReaderStudy.images.through)
def update_image_permissions(instance, action, reverse, model, pk_set, **_):
"""
Assign or remove view permissions to the readers group when images
are added or remove to/from the reader study images. Handles reverse
relations and clearing.
"""
if action not in ["post_add", "post_remove", "pre_clear"]:
# nothing to do for the other actions
return
if reverse:
images = Image.objects.filter(pk=instance.pk)
if pk_set is None:
# When using a _clear action, pk_set is None
# https://docs.djangoproject.com/en/2.2/ref/signals/#m2m-changed
reader_studies = instance.readerstudies.all()
else:
reader_studies = model.objects.filter(pk__in=pk_set)
reader_studies = reader_studies.select_related("readers_group")
else:
reader_studies = [instance]
if pk_set is None:
# When using a _clear action, pk_set is None
# https://docs.djangoproject.com/en/2.2/ref/signals/#m2m-changed
images = instance.images.all()
else:
images = model.objects.filter(pk__in=pk_set)
op = assign_perm if "add" in action else remove_perm
for rs in reader_studies:
op("view_image", rs.readers_group, images)
@receiver(m2m_changed, sender=Answer.images.through)
def assign_score(instance, action, reverse, model, pk_set, **_):
if action != "post_add":
return
add_scores.apply_async(
kwargs={
"instance_pk": str(instance.pk),
"pk_set": list(map(str, pk_set)),
}
)
|
the-stack_0_9854 | import os
import yaml
import collections
import logging
log = logging.getLogger(__name__)
# Recursive dictionary merge
# Copyright (C) 2016 Paul Durivage <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
def dict_merge(dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts
nested to an arbitrary depth, updating keys. The ``merge_dct`` is
merged into ``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.items():
if (
k in dct
and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)
):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
class ConfigClass(dict):
""" This wrapper class allows easy loading and overloading variables of our
configuration
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
def load_yaml(self, *args):
o = open(os.path.join(*args))
d = yaml.load(o.read(), Loader=yaml.SafeLoader)
dict_merge(self, d)
# load defaults
config = ConfigClass()
config.load_yaml(os.path.dirname(os.path.realpath(__file__)), "config-defaults.yaml")
try:
config.load_yaml(os.getcwd(), "config.yaml")
except Exception: # pragma: no cover
log.info("No config.yaml found in root directory! Using defaults ...")
|
the-stack_0_9855 | """."""
import os
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
SCRIPT_ABS_DIR = os.sys.path[0]
os.sys.path.append(os.path.join(SCRIPT_ABS_DIR, '../../config'))
import my_py_picture as config # noqa
figure_width = config.MAX_FULL_PAGE_WIDTH * 0.8
FIGSIZE = (figure_width, figure_width * 0.3)
DPI = config.DPI_GRAPH
LAYOUT = dict(pad=0, h_pad=0, w_pad=2, rect=(0, 0, 1, 1))
PLOTS_WIDTH_RATIOS = [0.6, 1]
LABEL_OPTIONS = dict(config.LABEL_OPTIONS, x=0.03, y=1)
X_LABEL = 'Cuteness'
Y_LABEL = 'Excitement'
def main():
"""."""
x_a = np.linspace(0, 10, num=10)
y_a = [0] + [1] * 9
x_b = np.linspace(0, 10, num=10)
y_b = np.linspace(0, 10, num=10)
fig = plt.figure(figsize=FIGSIZE, dpi=DPI)
gs = gridspec.GridSpec(1, 2, width_ratios=PLOTS_WIDTH_RATIOS)
ax_a = fig.add_subplot(gs[0])
ax_a.plot(x_a, y_a)
ax_a.set_xlabel(X_LABEL)
ax_a.set_ylabel(Y_LABEL)
ax_a.text(s=r'\textbf{a}', transform=ax_a.transAxes, **LABEL_OPTIONS)
ax_b = fig.add_subplot(gs[1])
ax_b.plot(x_b, y_b)
ax_b.set_xlabel(X_LABEL)
ax_b.set_ylabel(Y_LABEL)
ax_b.text(s=r'\textbf{b}', transform=ax_b.transAxes, **LABEL_OPTIONS)
fig.tight_layout(**LAYOUT)
fig.savefig(config.get_figure_file_path(os.sys.argv[0]))
plt.close(fig)
if __name__ == '__main__':
main()
|
the-stack_0_9856 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as keyclient
from gbpservice.common import utils
from gbpservice.contrib.nfp.config_orchestrator.common import topics
from gbpservice.nfp.core import log as nfp_logging
import netaddr
from neutron._i18n import _LE
from neutron.api.v2 import attributes as attr
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.db import l3_db
from neutron.db.l3_db import DEVICE_OWNER_ROUTER_INTF
from neutron.db.l3_db import EXTERNAL_GW_INFO
from neutron.db.l3_db import RouterPort
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.plugins.common import constants as n_const
import neutron_fwaas.extensions
from neutron_fwaas.services.firewall import fwaas_plugin as ref_fw_plugin
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import uuidutils
from sqlalchemy import orm
from neutron_fwaas.db.firewall import firewall_db as n_firewall
LOG = nfp_logging.getLogger(__name__)
class NFPFirewallPlugin(ref_fw_plugin.FirewallPlugin):
def __init__(self):
# Monkey patch L3 agent topic
# L3 agent was where reference firewall agent runs
# patch that topic to the NFP firewall agent's topic name
ref_fw_plugin.f_const.L3_AGENT = topics.FW_NFP_CONFIGAGENT_TOPIC
#n_topics.L3_AGENT = topics.FW_NFP_CONFIGAGENT_TOPIC
# Ensure neutron fwaas extensions are loaded
ext_path = neutron_fwaas.extensions.__path__[0]
if ext_path not in cfg.CONF.api_extensions_path.split(':'):
cfg.CONF.set_override(
'api_extensions_path',
cfg.CONF.api_extensions_path + ':' + ext_path)
super(NFPFirewallPlugin, self).__init__()
# Modifying following plugin function, to relax same router validation
def _get_routers_for_create_firewall(self, tenant_id, context, firewall):
# pop router_id as this goes in the router association db
# and not firewall db
router_ids = firewall['firewall'].pop('router_ids', None)
if router_ids == attr.ATTR_NOT_SPECIFIED:
return tenant_id
def set_routers_for_firewall(self, context, fw):
"""Sets the routers associated with the fw."""
pass
def get_firewall_routers(self, context, fwid):
"""Gets all routers associated with a firewall."""
fw_rtrs = ['1234567890']
return fw_rtrs
def validate_firewall_routers_not_in_use(
self, context, router_ids, fwid=None):
"""Validate if router-ids not associated with any firewall.
If any of the router-ids in the list is already associated with
a firewall, raise an exception else just return.
"""
pass
def update_firewall_routers(self, context, fw):
"""Update the firewall with new routers.
This involves removing existing router associations and replacing
it with the new router associations provided in the update method.
"""
return fw
# Monkey patching the create_firewall db method
def create_firewall(self, context, firewall, status=None):
fw = firewall['firewall']
tenant_id = fw['tenant_id']
# distributed routers may required a more complex state machine;
# the introduction of a new 'CREATED' state allows this, whilst
# keeping a backward compatible behavior of the logical resource.
if not status:
status = n_const.PENDING_CREATE
with context.session.begin(subtransactions=True):
self._validate_fw_parameters(context, fw, tenant_id)
firewall_db = n_firewall.Firewall(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=fw['name'],
description=fw['description'],
firewall_policy_id=fw['firewall_policy_id'],
admin_state_up=fw['admin_state_up'],
status=status)
context.session.add(firewall_db)
return self._make_firewall_dict(firewall_db)
n_firewall.Firewall_db_mixin.create_firewall = create_firewall
# Monkey patching l3_db's _get_router_for_floatingip method to associate
# floatingip if corresponding routes is present.
def _is_net_reachable_from_net(self, context, tenant_id, from_net_id,
to_net_id):
"""Check whether a network is reachable.
Follow the paths of networks connected by devices, to determine
whether a network is reachable from another.
@param context: neutron api request context
@param tenant_id: the owning tenant
@param from_net_id: the source network for the search
@param to_net_id: the destination network for the search
@return: True or False whether a path exists
"""
original_context = context
context = elevate_context(context)
tenant_id = context.tenant_id
def nexthop_nets_query(nets, visited):
"""query networks connected to devices on nets but not visited."""
Port = models_v2.Port
devices_on_nets = context.session.query(Port.device_id).filter(
Port.tenant_id == tenant_id,
Port.device_owner.notin_([l3_constants.DEVICE_OWNER_DHCP]),
Port.network_id.in_(nets)).subquery()
return context.session.query(Port.network_id).filter(
Port.tenant_id == tenant_id,
Port.network_id.notin_(visited),
Port.device_id.in_(devices_on_nets))
visited = set([])
nets = set([from_net_id])
while nets:
if to_net_id in nets:
context = original_context
return True
visited |= nets
nets = set((tup[0] for tup in nexthop_nets_query(nets, visited)))
context = original_context
return False
def _find_net_for_nexthop(self, context, tenant_id, router_id, nexthop):
"""Find the network to which the nexthop belongs.
Iterate over the router interfaces to find the network of nexthop.
@param context: neutron api request context
@param tenant_id: the owning tenant
@param router_id: a router id
@param nexthop: an IP address
@return: the network id of the nexthop or None if not found
"""
interfaces = context.session.query(models_v2.Port).filter_by(
tenant_id=tenant_id,
device_id=router_id,
device_owner=DEVICE_OWNER_ROUTER_INTF)
for interface in interfaces:
cidrs = [self._core_plugin._get_subnet(context,
ip['subnet_id'])['cidr']
for ip in interface['fixed_ips']]
if netaddr.all_matching_cidrs(nexthop, cidrs):
return interface['network_id']
def _find_routers_via_routes_for_floatingip(self, context, internal_port,
internal_subnet_id,
external_network_id):
"""Find routers with route to the internal IP address.
Iterate over the routers that belong to the same tenant as
'internal_port'. For each router check that the router is connected
to the external network and whether there is a route to the internal
IP address. Consider only routers for which there is a path from the
nexthop of the route to the internal port.
Sort the list of routers to have the router with the most specific
route first (largest CIDR prefix mask length).
@param context: neutron api request context
@param internal_port: the port dict for the association
@param internal_subnet_id: the subnet for the association
@param external_network_id: the network of the floatingip
@return: a sorted list of matching routers
"""
original_context = context
context = elevate_context(context)
internal_ip_address = [
ip['ip_address'] for ip in internal_port['fixed_ips']
if ip['subnet_id'] == internal_subnet_id
][0]
# find the tenant routers
tenant_id = internal_port['tenant_id']
routers = self.get_routers(context, filters={'tenant_id': [tenant_id]})
prefix_routers = []
for router in routers:
# verify that the router is on "external_network"
gw_info = router.get(EXTERNAL_GW_INFO)
if not gw_info or gw_info['network_id'] != external_network_id:
continue
# find a matching route
if 'routes' not in router:
continue
cidr_nexthops = {}
for route in router['routes']:
cidr = netaddr.IPNetwork(route['destination'])
if cidr not in cidr_nexthops:
cidr_nexthops[cidr] = []
cidr_nexthops[cidr].append(route['nexthop'])
smallest_cidr = netaddr.smallest_matching_cidr(
internal_ip_address,
cidr_nexthops.keys())
if not smallest_cidr:
continue
# validate that there exists a path to "internal_port"
for nexthop in cidr_nexthops[smallest_cidr]:
net_id = self._find_net_for_nexthop(context, context.tenant_id,
router['id'], nexthop)
if net_id and self._is_net_reachable_from_net(
context,
context.tenant_id,
net_id,
internal_port['network_id']):
prefix_routers.append(
(smallest_cidr.prefixlen, router['id']))
break
context = original_context
return [p_r[1] for p_r in sorted(prefix_routers, reverse=True)]
def elevate_context(context):
context = context.elevated()
context.tenant_id = _resource_owner_tenant_id()
return context
def _resource_owner_tenant_id():
user, pwd, tenant, auth_url = utils.get_keystone_creds()
keystoneclient = keyclient.Client(username=user, password=pwd,
auth_url=auth_url)
try:
tenant = keystoneclient.tenants.find(name=tenant)
return tenant.id
except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('No tenant with name %s exists.'), tenant)
except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('Multiple tenants matches found for %s'), tenant)
def _get_router_for_floatingip(self, context, internal_port,
internal_subnet_id,
external_network_id):
subnet = self._core_plugin.get_subnet(context, internal_subnet_id)
if not subnet['gateway_ip']:
msg = (_('Cannot add floating IP to port on subnet %s '
'which has no gateway_ip') % internal_subnet_id)
raise n_exc.BadRequest(resource='floatingip', msg=msg)
# Find routers(with router_id and interface address) that
# connect given internal subnet and the external network.
# Among them, if the router's interface address matches
# with subnet's gateway-ip, return that router.
# Otherwise return the first router.
gw_port = orm.aliased(models_v2.Port, name="gw_port")
routerport_qry = context.session.query(
RouterPort.router_id, models_v2.IPAllocation.ip_address).join(
models_v2.Port, models_v2.IPAllocation).filter(
models_v2.Port.network_id == internal_port['network_id'],
RouterPort.port_type.in_(l3_constants.ROUTER_INTERFACE_OWNERS),
models_v2.IPAllocation.subnet_id == internal_subnet_id
).join(gw_port, gw_port.device_id == RouterPort.router_id).filter(
gw_port.network_id == external_network_id).distinct()
first_router_id = None
for router_id, interface_ip in routerport_qry:
if interface_ip == subnet['gateway_ip']:
return router_id
if not first_router_id:
first_router_id = router_id
if first_router_id:
return first_router_id
router_ids = self._find_routers_via_routes_for_floatingip(
context,
internal_port,
internal_subnet_id,
external_network_id)
if router_ids:
return router_ids[0]
raise l3.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet_id,
external_network_id=external_network_id,
port_id=internal_port['id'])
l3_db.L3_NAT_dbonly_mixin._get_router_for_floatingip = (
_get_router_for_floatingip)
l3_db.L3_NAT_dbonly_mixin._find_routers_via_routes_for_floatingip = (
_find_routers_via_routes_for_floatingip)
l3_db.L3_NAT_dbonly_mixin._find_net_for_nexthop = _find_net_for_nexthop
l3_db.L3_NAT_dbonly_mixin._is_net_reachable_from_net = (
_is_net_reachable_from_net)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.