filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_12101 | import json
import sys
from typing import Any, List, Mapping, Tuple
from pydantic.tools import parse_obj_as
from common import MethodAbs, get_metrics
with open(sys.argv[1], "r") as f:
dump1: Mapping[str, List[Tuple[str, Any]]] = json.load(f)
with open(sys.argv[2], "r") as f:
dump2: Mapping[str, List[Tuple[str, Any]]] = json.load(f)
for m in set(dump1.keys()).intersection(set(dump2.keys())):
m_printed = False
abs1 = dump1[m]
abs2 = dump2[m]
parsed1 = parse_obj_as(MethodAbs, abs1)
parsed2 = parse_obj_as(MethodAbs, abs2)
metrics1 = get_metrics(parsed1)
metrics2 = get_metrics(parsed2)
# assert len(metrics1) == len(metrics2)
if len(metrics1) != len(metrics2):
continue
if metrics1 != metrics2:
for s1, s2 in zip(metrics1, metrics2):
unit = s1["unit"]
# assert s1["unit"] == s2["unit"]
if s1["unit"] != s2["unit"]:
continue
for k in s1["metrics"].keys():
m1 = s1["metrics"][k]
m2 = s2["metrics"][k]
if m1 != m2:
print("- %s: %s != %s" % (k, m1, m2))
if m1 > m2:
if not m_printed:
print(f"===== Method {m} =====")
print(parsed1.body)
print()
m_printed = True
print(f"\t* [hint] method = {m}")
print(f"\t* [hint] unit = {unit}")
print("\t* [values] v1 = %s" % s1["values"][k[:-len("-size")]])
print("\t* [values] v2 = %s" % s2["values"][k[:-len("-size")]])
print() |
the-stack_0_12102 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import yaml
import argparse
from pprint import pprint
from attrdict import AttrDict
import paddle
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.transformers import TransformerModel, InferTransformerModel, CrossEntropyCriterion, position_encoding_init
from paddlenlp.utils.log import logger
from paddlenlp.datasets import DatasetBuilder
from paddlenlp.transformers import ElectraForTokenClassification, ElectraTokenizer
from paddlenlp.data import Stack, Tuple, Pad, Dict
from dataloader import create_dataloader,load_dataset
import paddle.distributed as dist
import yaml
import argparse
from pprint import pprint
from attrdict import AttrDict
import paddle
from paddle.io import DataLoader
import os
import pandas as pd
from sklearn.metrics import classification_report
from functools import partial
def compute_metrics(labels, decodes, lens):
decodes = [x for batch in decodes for x in batch]
lens = [x for batch in lens for x in batch]
labels=[x for batch in labels for x in batch]
outputs = []
nb_correct=0
nb_true=0
val_f1s=[]
label_vals=[0,1,2,3]
y_trues=[]
y_preds=[]
for idx, end in enumerate(lens):
y_true = labels[idx][:end].tolist()
y_pred = [x for x in decodes[idx][:end]]
nb_correct += sum(y_t == y_p for y_t, y_p in zip(y_true, y_pred))
nb_true+=len(y_true)
y_trues.extend(y_true)
y_preds.extend(y_pred)
score = nb_correct / nb_true
# val_f1 = metrics.f1_score(y_trues, y_preds, average='micro', labels=label_vals)
result=classification_report(y_trues, y_preds)
# print(val_f1)
return score,result
def evaluate(model, loss_fct, data_loader, label_num):
model.eval()
pred_list = []
len_list = []
labels_list=[]
for batch in data_loader:
input_ids, token_type_ids, length, labels = batch
logits = model(input_ids, token_type_ids)
loss = loss_fct(logits, labels)
avg_loss = paddle.mean(loss)
pred = paddle.argmax(logits, axis=-1)
pred_list.append(pred.numpy())
len_list.append(length.numpy())
labels_list.append(labels.numpy())
accuracy,result=compute_metrics(labels_list, pred_list, len_list)
print("eval loss: %f, accuracy: %f" % (avg_loss, accuracy))
print(result)
model.train()
# evaluate(model, loss_fct, metric, test_data_loader,label_num)
def do_train(args):
last_step = args.num_train_epochs * len(train_data_loader)
tic_train = time.time()
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_data_loader):
args.global_step += 1
print('~~~~~~~~~~~~~~~~~~~~~args.global_step',args.global_step)
input_ids, token_type_ids, _, labels = batch
logits = model(input_ids, token_type_ids)
loss = loss_fct(logits, labels)
avg_loss = paddle.mean(loss)
if args.global_step % args.logging_steps == 0:
print("global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
% (args.global_step, epoch, step, avg_loss,
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
avg_loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
if args.global_step % args.save_steps == 0 or args.global_step == last_step:
if paddle.distributed.get_rank() == 0:
evaluate(model, loss_fct, test_data_loader, label_num)
paddle.save(model.state_dict(),os.path.join(args.output_dir,
"model_%d.pdparams" % args.global_step))
# 模型训练
if __name__ == '__main__':
# 读入参数
yaml_file = './electra.base.yaml'
with open(yaml_file, 'rt') as f:
args = AttrDict(yaml.safe_load(f))
# pprint(args)
paddle.set_device(args.device) # 使用gpu,相应地,安装paddlepaddle-gpu
train_data_loader, test_data_loader = create_dataloader(args)
# 加载dataset
# Create dataset, tokenizer and dataloader.
train_ds, test_ds = load_dataset('TEDTalk', splits=('train', 'test'), lazy=False)
label_list = train_ds.label_list
label_num = len(label_list)
# 加载预训练模型
# Define the model netword and its loss
model = ElectraForTokenClassification.from_pretrained(args.model_name_or_path, num_classes= label_num)
# 设置AdamW优化器
num_training_steps = args.max_steps if args.max_steps > 0 else len(
train_data_loader) * args.num_train_epochs
lr_scheduler = LinearDecayWithWarmup(float(args.learning_rate), num_training_steps, args.warmup_steps)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=float(args.adam_epsilon),
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
# 设置CrossEntropy损失函数
loss_fct = paddle.nn.loss.CrossEntropyLoss(ignore_index=args.ignore_label)
# 设置评估方式
metric = paddle.metric.Accuracy()
# 开始训练
do_train(args)
|
the-stack_0_12103 |
from typing import Union, Optional, Set
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.common_types import _size_1_t, _size_2_t, _size_3_t
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.parameter import Parameter
from ..spaces import ValueSpace
from .base_module import FinegrainedModule
from .utils import sub_filter_start_end, is_searchable
__all__ = [
'BaseConvNd',
'Conv1d',
'Conv2d',
'Conv3d'
]
class BaseConvNd(_ConvNd, FinegrainedModule):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, tuple, ValueSpace],
stride: Union[int, tuple, ValueSpace],
padding: Union[str, int, tuple, ValueSpace],
dilation: Union[int, tuple, ValueSpace],
groups: Union[int, tuple, ValueSpace],
bias: bool,
padding_mode: str = 'zeros',
auto_padding: bool = False,
*args,
**kwargs
):
'''Base Conv Module
Args:
auto_padding: if set to true, will set a proper padding size to make output size same as the input size.
For example, if kernel size is 3, the padding size is 1;
if kernel_size is (3,7), the padding size is (1, 3)
'''
self.conv_dim = self.__class__.__name__[-2:]
# first initialize by FinegrainedModule
FinegrainedModule.__init__(self)
conv_kwargs = {
key: getattr(self, key, None) for key in [
'in_channels', 'out_channels', 'kernel_size',
'stride', 'padding', 'dilation', 'groups', 'bias'
]
}
self.init_ops(**conv_kwargs)
# then initialized by _ConvNd
_ConvNd.__init__(
self, self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding,
self.dilation, False, self.output_padding, self.groups, True, self.padding_mode)
if not bias:
del self.bias
self.register_parameter('bias', None)
self.is_search = self.isSearchConv()
def init_ops(self, *args, **kwargs):
'''Generate Conv operation'''
raise NotImplementedError
def isSearchConv(self):
'''Search flag
Supported arguments
- search_in_channel
- search_out_channel
- search_kernel_size
- search_stride
- search_dilation
- search_groups
'''
self.search_in_channel = False
self.search_out_channel = False
self.search_kernel_size = False
self.search_stride = False
self.search_dilation = False
self.search_groups = False
# self.search_bias = False
if all([not vs.is_search for vs in self.value_spaces.values()]):
return False
if is_searchable(getattr(self.value_spaces, 'in_channels', None)):
self.search_in_channel = True
if is_searchable(getattr(self.value_spaces, 'out_channels', None)):
self.search_out_channel = True
if is_searchable(getattr(self.value_spaces, 'kernel_size', None)):
kernel_candidates = self.value_spaces['kernel_size'].candidates
max_k = self.kernel_size
# Todo: 与`transform_kernel_size`搭配使用,目前未使用
# for i, k in enumerate(sorted(kernel_candidates)[:-1]):
# self.register_parameter(f'{max_k}to{k}_kernelMatrix', Parameter(torch.rand(max_k**2, k**2)))
self.search_kernel_size = True
if is_searchable(getattr(self.value_spaces, 'stride', None)):
self.search_stride = True
if is_searchable(getattr(self.value_spaces, 'dilation', None)):
self.search_dilation = True
if is_searchable(getattr(self.value_spaces, 'groups', None)):
self.search_groups = True
# if is_searchable(getattr(self.value_spaces, 'bias', None)):
# self.search_bias = True
return True
###########################################
# forward implementation
# - forward_conv
# - transform_kernel_size
###########################################
def forward(self, x):
out = None
if not self.is_search:
padding = self.padding
if self.auto_padding:
kernel_size = self.weight.shape[2:]
padding = []
for k in kernel_size:
padding.append(k//2)
out = self.conv(x, self.weight, self.bias, self.stride,
padding, self.dilation, self.groups)
else:
out = self.forward_conv(x)
return out
def forward_conv(self, x):
filters = self.weight.contiguous()
bias = self.bias
in_channels = self.in_channels
out_channels = self.out_channels
stride = self.value_spaces['stride'].value if self.search_stride else self.stride
groups = self.value_spaces['groups'].value if self.search_groups else self.groups
dilation = self.value_spaces['dilation'].value if self.search_dilation else self.dilation
padding = self.padding
if self.search_in_channel:
in_channels = self.value_spaces['in_channels'].value
filters = filters[:, :in_channels, ...]
if self.search_out_channel:
out_channels = self.value_spaces['out_channels'].value
if self.bias is not None:
bias = bias[:out_channels]
filters = filters[:out_channels, ...]
if self.search_kernel_size:
filters = self.transform_kernel_size(filters)
if self.search_groups:
filters = self.get_filters_by_groups(filters, in_channels, groups).contiguous()
if self.auto_padding:
kernel_size = filters.shape[2:]
padding = []
for k in kernel_size:
padding.append(k//2)
return self.conv(x, filters, bias, stride, padding, dilation, groups)
def get_filters_by_groups(self, filters, in_channels, groups):
'''Get filters when searching for #of groups'''
sub_filters = torch.chunk(filters, groups, dim=0)
sub_in_channels = in_channels // groups
sub_ratio = filters.size(1) // sub_in_channels
filter_crops = []
for i, sub_filter in enumerate(sub_filters):
part_id = i % sub_ratio
start = part_id * sub_in_channels
filter_crops.append(sub_filter[:, start:start + sub_in_channels, :, :])
filters = torch.cat(filter_crops, dim=0)
return filters
def transform_kernel_size(self, filters):
# Todo: support different types of kernel size transformation methods by `transform_kernel_size` function
sub_kernel_size = self.value_spaces['kernel_size'].value
start, end = sub_filter_start_end(self.kernel_size, sub_kernel_size)
if self.conv_dim=='1d': filters = filters[:, :, start:end]
if self.conv_dim=='2d': filters = filters[:, :, start:end, start:end]
if self.conv_dim=='3d': filters = filters[:, :, start:end, start:end, start:end]
return filters
def sort_weight_bias(self, module):
if self.search_in_channel:
vc = self.value_spaces['in_channels']
module.weight.data = torch.index_select(module.weight.data, 1, vc.sortIdx)
if self.search_out_channel:
vc = self.value_spaces['out_channels']
module.weight.data = torch.index_select(module.weight.data, 0, vc.sortIdx)
if self.bias is not None:
module.bias.data = torch.index_select(module.bias.data, 0, vc.sortIdx)
###########################################
# property
###########################################
@property
def params(self):
'''The number of the trainable parameters'''
# conv
weight = self.weight
bias = self.bias
if self.search_in_channel:
in_channels = self.value_spaces['in_channels'].value
weight = weight[:, :in_channels, ...]
if self.search_out_channel:
out_channels = self.value_spaces['out_channels'].value
weight = weight[:out_channels, :, ...]
if bias is not None: bias = bias[:out_channels]
if self.search_kernel_size:
kernel_size = self.value_spaces['kernel_size'].value
start, end = sub_filter_start_end(self.kernel_size, kernel_size)
shape_size = len(weight.shape)
if shape_size == 3:
# 1D conv
weight = weight[:, :, start:end]
elif shape_size == 4:
# 2D conv
weight = weight[:, :, start:end, start:end]
else:
# 3D conv
weight = weight[:, :, start:end, start:end, start:end]
parameters = [weight, bias]
params = sum([p.numel() for p in parameters if p is not None])
return params
class Conv1d(BaseConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[_size_1_t, ValueSpace],
stride: Union[_size_1_t, ValueSpace] = 1,
padding: Union[str, _size_1_t, ValueSpace] = 0,
dilation: Union[_size_1_t, ValueSpace] = 1,
groups: Union[int, ValueSpace] = 1,
bias: bool = True,
padding_mode: str = 'zeros',
auto_padding: bool = False
):
super(Conv1d, self).__init__(
in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias, padding_mode)
def init_ops(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: Union[str, _size_1_t] = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True
):
'''Generate Conv operation'''
self.kernel_size = _single(kernel_size)
self.stride = _single(stride)
self.padding = padding if isinstance(padding, str) else _single(padding)
self.dilation = _single(dilation)
self.output_padding = _single(0)
self.conv = F.conv1d
class Conv2d(BaseConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, tuple],
stride: Union[int, tuple] = 1,
padding: Union[str, int, tuple] = 0,
dilation: Union[int, tuple] = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
auto_padding: bool = False
):
super(Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias, padding_mode)
def init_ops(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: Union[str, _size_2_t] = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
):
'''Generate Conv operation'''
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = padding if isinstance(padding, str) else _pair(padding)
self.dilation = _pair(dilation)
self.output_padding = _pair(0)
self.conv = F.conv2d
class Conv3d(BaseConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, tuple],
stride: Union[int, tuple] = 1,
padding: Union[str, int, tuple] = 0,
dilation: Union[int, tuple] = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
auto_padding: bool = False
):
super(Conv3d, self).__init__(
in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias, padding_mode)
def init_ops(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: Union[str, _size_3_t] = 0,
dilation: _size_3_t = 1,
groups: int = 1,
bias: bool = True
):
'''Generate Conv operation'''
self.kernel_size = _triple(kernel_size)
self.stride = _triple(stride)
self.padding = padding if isinstance(padding, str) else _triple(padding)
self.dilation = _triple(dilation)
self.output_padding = _triple(0)
self.conv = F.conv3d
|
the-stack_0_12104 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Orbit description
"""
import numpy as np
from textwrap import indent
from ..constants import c
from ..dates import timedelta
from ..errors import OrbitError
from .forms import get_form, Form, _cache_param_names
from ..frames.frames import get_frame, orbit2frame
from .man import Man
from .cov import Cov
class StateVector(np.ndarray):
"""Coordinate representation"""
def __new__(cls, coord, date, form, frame, **kwargs):
"""
Args:
coord (list): 6-length state vector
date (Date): Date associated with the state vector
form (str or Form): Name of the form of the state vector
frame (str or Frame): Name of the frame of reference of the state vector
"""
if len(coord) != 6:
raise OrbitError("Should be 6 in length")
if isinstance(form, str):
form = get_form(form)
if isinstance(frame, str):
frame = get_frame(frame)
obj = np.ndarray.__new__(
cls, (6,), buffer=np.array([float(x) for x in coord]), dtype=float
)
kwargs["date"] = date
kwargs["form"] = form
kwargs["frame"] = frame
object.__setattr__(obj, "_data", kwargs)
return obj
def __array_finalize__(self, obj):
if obj is None:
return
object.__setattr__(self, "_data", obj._data.copy())
def __reduce__(self):
"""For pickling
see http://stackoverflow.com/questions/26598109
"""
reconstruct, clsinfo, state = super().__reduce__()
new_state = {
"basestate": state,
"data": self._data,
}
return reconstruct, clsinfo, new_state
def __setstate__(self, state):
"""For pickling
see http://stackoverflow.com/questions/26598109
"""
super().__setstate__(state["basestate"])
object.__setattr__(self, "_data", state["data"])
def copy(self, *, frame=None, form=None, same=None):
"""Provide a new object of the same point in space-time. Optionally,
allow for frame and form conversion
Keyword Args:
frame (str or Frame): Frame to convert the new instance into
form (str or Form): Form to convert the new instance into
same (StateVector): A statevector from which to copy the frame and form
Return:
StateVector :
If the argument *same* is used, it overwrites *frame* and *form*.
Example:
.. code-block:: python
# New instance of the same statevector
sv1 = sv.copy()
# statevector converted into spherical form
sv2 = sv.copy(form="spherical")
# statevector converted into EME2000 frame, keplerian form
sv3 = sv.copy(form="keplerian", frame="EME2000")
# statevector in the same frame and form as sv3 (EME2000, keplerian)
sv4 = sv.copy(same=sv3)
Override :py:meth:`numpy.ndarray.copy()` to include additional
fields
"""
new_compl = {}
for k, v in self._data.items():
new_compl[k] = v.copy() if hasattr(v, "copy") else v
new_obj = self.__class__(self.base, **new_compl)
if same is not None:
if hasattr(same, "frame") and hasattr(same, "form"):
frame = same.frame
form = same.form
else:
raise TypeError("'same' does not have a frame and/or a form attribute")
if frame and frame != self.frame:
new_obj.frame = frame
if form and form != self.form:
new_obj.form = form
return new_obj
def __getattr__(self, name):
name = Form.alt.get(name, name)
# Verification if the variable is available in the current form
if name in self.form.param_names:
i = self.form.param_names.index(name)
res = self[i]
elif name in _cache_param_names:
# The attribute we are trying to access is used in another
# form of StateVector that the one currently used by the object
raise AttributeError(f"'{name}' is not available in '{self.form}' form")
elif name in self._data.keys():
res = self._data[name]
else:
raise AttributeError(f"'{self.__class__}' object has no attribute {name!r}")
return res
def __setattr__(self, name, value):
propobj = getattr(self.__class__, name, None)
if isinstance(propobj, property):
# If the attribute we are trying to set is a property
# (i.e. getter and setters functions defined below)
# we have to call it, in lieu of setting the value directly
# in the __setattr__ method.
if propobj.fset is None: # pragma: no cover
raise AttributeError("can't set attribute")
else:
return propobj.fset(self, value)
else:
name = Form.alt.get(name, name)
# Verification if the variable is available in the current form
if name in self.form.param_names:
i = self.form.param_names.index(name)
self[i] = value
elif name in _cache_param_names:
# The name of the attribute we are trying to set is used in
# another form of StateVector that the one currently used by
# the object
raise AttributeError(f"'{name}' is not available in '{self.form}' form")
else:
self._data[name] = value
def __getitem__(self, key):
if isinstance(key, (int, slice)):
return super().__getitem__(key)
else:
try:
return self.__getattr__(key)
except AttributeError as err:
raise KeyError(str(err))
def __setitem__(self, key, value):
if isinstance(key, (int, slice)):
super().__setitem__(key, value)
else:
try:
self.__setattr__(key, value)
except AttributeError as err:
raise KeyError(str(err))
def __str__(self): # pragma: no cover
return str(self.base)
def __repr__(self): # pragma: no cover
coord_str = "\n".join(
[
" %s = %s" % (name, arg)
for name, arg in zip(self.form.param_names, self)
]
)
fmt = f"""
StateVector =
date = {self.date}
form = {self.form}
frame = {self.frame}
coord =
{coord_str}
"""
# Add covariance to the repr
if self.cov is not None:
fmt += indent(repr(self.cov), " " * 2)
# Add man to the repr if there is some
if self.maneuvers:
fmt += " maneuvers =\n"
for man in self.maneuvers:
fmt += indent(repr(man), " " * 4)
return fmt
@property
def date(self):
return self._data["date"]
@date.setter
def date(self, value):
self._data["date"] = value
@property
def event(self):
return self._data.get("event")
@event.setter
def event(self, value):
self._data["event"] = value
@property
def cov(self):
""":py:class:`~beyond.orbits.cov.Cov`: 6x6 Covariance matrix
If a statevector and its covariance are expressed in the same frame,
changing the frame of the statevector will trigger the change of its
covariance frame.
"""
if "cov" not in self._data.keys():
self._data["cov"] = None
return self._data["cov"]
@cov.setter
def cov(self, value):
if not isinstance(value, Cov):
raise TypeError(f"Unknwon covariance type : {type(value)}")
self._data["cov"] = value
self._data["cov"].orb = self
@cov.deleter
def cov(self):
self._data["cov"] = None
@property
def maneuvers(self):
"""list of :py:class:`~beyond.orbits.man.Man`: Maneuver descriptions usable by the
propagator. Not all propagators can handle maneuvers. Check their respective documentations
for more details.
"""
mans = self._data.setdefault("maneuvers", [])
if isinstance(mans, Man):
mans = [mans]
self._data["maneuvers"] = mans
return mans
@maneuvers.setter
def maneuvers(self, mans):
if isinstance(mans, Man):
mans = [mans]
self._data["maneuvers"] = mans
@maneuvers.deleter
def maneuvers(self):
del self._data["maneuvers"]
@property
def form(self):
""":py:class:`~beyond.orbits.forms.Form`: Form of the coordinates of the orbit
If set as a string (e.g. ``"cartesian"``) will be automatically converted to the
corresponding Form object.
.. code-block:: python
orbit.form = "cartesian"
# is equivalent to
from beyond.orbits.forms import CART
orbit.form = CART
"""
return self._data["form"]
@form.setter
def form(self, new_form):
if isinstance(new_form, str):
new_form = get_form(new_form)
self.base.setfield(self._data["form"](self, new_form), dtype=float)
self._data["form"] = new_form
@property
def frame(self):
""":py:class:`~beyond.frames.frames.Frame`: Reference frame of the orbit
If set as a string (e.g. ``"EME2000"``) will be automatically converted to the
corresponding Frame object.
.. code-block:: python
orbit.frame = "EME2000"
# is equivalent to
from beyond.frames.frames import EME2000
orbit.frame = EME2000
"""
return self._data["frame"]
@frame.setter
def frame(self, new_frame):
old_form = self.form
old_frame = self.frame
if isinstance(new_frame, str):
new_frame = get_frame(new_frame)
if new_frame != self.frame:
self.form = "cartesian"
try:
new_coord = self.frame.transform(self, new_frame)
self.base.setfield(new_coord, dtype=float)
self._data["frame"] = new_frame
finally:
self.form = old_form
if self.cov is not None and self.cov.frame == old_frame:
self.cov.frame = new_frame
def as_frame(self, name, **kwargs): # pragma: no cover
"""Register the orbit as frame.
see :py:func:`beyond.frames.frames.orbit2frame` for details of the arguments
"""
return orbit2frame(name, self, **kwargs)
def as_orbit(self, propagator):
"""Attach a propagator to a StateVector, creating a new Orbit object
Args:
propagator (~beyond.propabator.base.Propagator) :
Return:
Orbit : New Orbit object, with the same state as the creating StateVector
"""
from .orbit import Orbit
new_dict = self._data.copy()
new_dict["propagator"] = propagator
return Orbit(self.base, **new_dict)
@property
def infos(self):
""":py:class:`Infos` object of ``self``"""
if not hasattr(self, "_infos"):
self._data["infos"] = Infos(self)
return self._data["infos"]
class Infos:
"""Compute additional informations on an orbit"""
def __init__(self, orb):
self.orb = orb
@property
def kep(self):
if not hasattr(self, "_kep"):
self._kep = self.orb.copy(form="keplerian")
return self._kep
@property
def sphe(self):
if not hasattr(self, "_sphe"):
self._sphe = self.orb.copy(form="spherical")
return self._sphe
@property
def mu(self):
return self.orb.frame.center.body.mu
@property
def type(self):
for t in "elliptic hyperbolic parabolic".split():
if getattr(self, t):
return t
@property
def elliptic(self):
"""True if the orbit it elliptic"""
return self.kep.e < 1
@property
def parabolic(self):
"""True if the orbit it parabolic"""
return self.kep.e == 1
@property
def hyperbolic(self):
"""True if the orbit it hyperbolic"""
return self.kep.e > 1
@property
def energy(self):
"""Mechanical energy of the orbit"""
return -self.mu / (2 * self.kep.a)
@property
def n(self):
"""Mean motion"""
return np.sqrt(self.mu / abs(self.kep.a) ** 3)
@property
def period(self):
"""Period of the orbit as a timedelta"""
if not self.elliptic:
raise ValueError("period undefined : orbit is hyperbolic")
return timedelta(seconds=2 * np.pi / self.n)
@property
def apocenter(self):
"""Radius of the apocenter"""
if not self.elliptic:
raise ValueError("apocenter undefined : orbit is hyperbolic")
return self.kep.a * (1 + self.kep.e)
@property
def pericenter(self):
"""Radius of the pericenter"""
return self.kep.a * (1 - self.kep.e)
@property
def r(self):
"""Instantaneous radius"""
return self.sphe.r
@property
def ra(self):
"""Radius of the apocenter"""
return self.apocenter
@property
def rp(self):
"""Radius of the pericenter"""
return self.pericenter
@property
def zp(self):
"""Altitude of the pericenter, relative to the body equatorial surface"""
return self.rp - self.orb.frame.center.body.r
@property
def za(self):
"""Altitude of the apocenter, relative to the body equatorial surface"""
return self.ra - self.orb.frame.center.body.r
@property
def v(self):
"""Instantaneous velocity"""
return np.sqrt(self.mu * (2 / self.r - 1 / self.kep.a))
@property
def va(self):
"""Velocity at apocenter"""
if not self.elliptic:
raise ValueError("va undefined : orbit not elliptic")
return np.sqrt(self.mu * (2 / (self.ra) - 1 / self.kep.a))
@property
def vp(self):
"""Velocity at pericenter"""
return np.sqrt(self.mu * (2 / (self.rp) - 1 / self.kep.a))
@property
def vinf(self):
"""Hyperbolic excess velocity"""
if not self.hyperbolic:
raise ValueError("vinf undefined : orbit not hyperbolic")
return np.sqrt(self.mu / abs(self.kep.a))
@property
def dinf(self):
"""Distance between the focus and the asymptote"""
if not self.hyperbolic:
raise ValueError("dinf undefined : orbit not hyperbolic")
return abs(self.kep.a * self.kep.e) * np.sqrt(1 - (1 / self.kep.e) ** 2)
@property
def cos_fpa(self):
return (
np.sqrt(self.mu / (self.kep.a * (1 - self.kep.e ** 2)))
* (1 + self.kep.e * np.cos(self.kep.nu))
/ self.kep.nu
)
@property
def sin_fpa(self):
return (
np.sqrt(self.mu / (self.kep.a * (1 - self.kep.e ** 2)))
* self.kep.e
* np.sin(self.kep.nu)
/ self.kep.nu
)
@property
def fpa(self):
"""Flight path angle"""
return np.arctan2(self.sin_fpa, self.cos_fpa)
@property
def delay(self):
""":py:class:`~datetime.timedelta`: Light propagation delay from the point
in space described by ``self`` to the center of the reference frame
"""
return timedelta(seconds=self.sphe.r / c)
|
the-stack_0_12105 | from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
import numpy as np
from numpy.random.mtrand import RandomState
import subprocess
from multiprocessing import pool
import pandas as pd
def get_mnms_data(data_root):
files_raw = []
files_gt = []
for r, dirs, files in os.walk(data_root):
for f in files:
if f.endswith('nii.gz'):
file_path = os.path.join(r, f)
if '_gt' in f:
files_gt.append(file_path)
else:
files_raw.append(file_path)
return files_raw, files_gt
def generate_filename_for_nnunet(pat_id, ts, pat_folder=None, add_zeros=False, vendor=None, centre=None, mode='mnms',
data_format='nii.gz'):
if not vendor or not centre:
if add_zeros:
filename = "{}_{}_0000.{}".format(pat_id, str(ts).zfill(4), data_format)
else:
filename = "{}_{}.{}".format(pat_id, str(ts).zfill(4), data_format)
else:
if mode == 'mnms':
if add_zeros:
filename = "{}_{}_{}_{}_0000.{}".format(pat_id, str(ts).zfill(4), vendor, centre, data_format)
else:
filename = "{}_{}_{}_{}.{}".format(pat_id, str(ts).zfill(4), vendor, centre, data_format)
else:
if add_zeros:
filename = "{}_{}_{}_{}_0000.{}".format(vendor, centre, pat_id, str(ts).zfill(4), data_format)
else:
filename = "{}_{}_{}_{}.{}".format(vendor, centre, pat_id, str(ts).zfill(4), data_format)
if pat_folder:
filename = os.path.join(pat_folder, filename)
return filename
def select_annotated_frames_mms(data_folder, out_folder, add_zeros=False, mode='mnms', df_path="/media/full/tera2/data/challenges/mms/Training-corrected_original/M&Ms Dataset Information.xlsx"):
table = pd.read_excel(df_path, index_col='External code')
for idx in table.index:
ed = table.loc[idx, 'ED']
es = table.loc[idx, 'ES']
vendor = table.loc[idx, 'Vendor']
centre = table.loc[idx, 'Centre']
if vendor != "C":
# generate old filename (w/o vendor and centre)
filename_ed_original = generate_filename_for_nnunet(pat_id=idx, ts=ed, pat_folder=data_folder,
vendor=None, centre=None, add_zeros=False)
filename_es_original = generate_filename_for_nnunet(pat_id=idx, ts=es, pat_folder=data_folder,
vendor=None, centre=None, add_zeros=False)
# generate new filename with vendor and centre
filename_ed = generate_filename_for_nnunet(pat_id=idx, ts=ed, pat_folder=out_folder,
vendor=vendor, centre=centre, add_zeros=add_zeros, mode=mode)
filename_es = generate_filename_for_nnunet(pat_id=idx, ts=es, pat_folder=out_folder,
vendor=vendor, centre=centre, add_zeros=add_zeros, mode=mode)
shutil.copy(filename_ed_original, filename_ed)
shutil.copy(filename_es_original, filename_es)
def create_custom_splits_for_experiments(task_path):
data_keys = [i[:-4] for i in
subfiles(os.path.join(task_path, "nnUNetData_plans_v2.1_2D_stage0"),
join=False, suffix='npz')]
existing_splits = os.path.join(task_path, "splits_final.pkl")
splits = load_pickle(existing_splits)
splits = splits[:5] # discard old changes
unique_a_only = np.unique([i.split('_')[0] for i in data_keys if i.find('_A_') != -1])
unique_b_only = np.unique([i.split('_')[0] for i in data_keys if i.find('_B_') != -1])
num_train_a = int(np.round(0.8 * len(unique_a_only)))
num_train_b = int(np.round(0.8 * len(unique_b_only)))
p = RandomState(1234)
idx_a_train = p.choice(len(unique_a_only), num_train_a, replace=False)
idx_b_train = p.choice(len(unique_b_only), num_train_b, replace=False)
identifiers_a_train = [unique_a_only[i] for i in idx_a_train]
identifiers_b_train = [unique_b_only[i] for i in idx_b_train]
identifiers_a_val = [i for i in unique_a_only if i not in identifiers_a_train]
identifiers_b_val = [i for i in unique_b_only if i not in identifiers_b_train]
# fold 5 will be train on a and eval on val sets of a and b
splits.append({'train': [i for i in data_keys if i.split("_")[0] in identifiers_a_train],
'val': [i for i in data_keys if i.split("_")[0] in identifiers_a_val] + [i for i in data_keys if
i.split("_")[
0] in identifiers_b_val]})
# fold 6 will be train on b and eval on val sets of a and b
splits.append({'train': [i for i in data_keys if i.split("_")[0] in identifiers_b_train],
'val': [i for i in data_keys if i.split("_")[0] in identifiers_a_val] + [i for i in data_keys if
i.split("_")[
0] in identifiers_b_val]})
# fold 7 train on both, eval on both
splits.append({'train': [i for i in data_keys if i.split("_")[0] in identifiers_b_train] + [i for i in data_keys if i.split("_")[0] in identifiers_a_train],
'val': [i for i in data_keys if i.split("_")[0] in identifiers_a_val] + [i for i in data_keys if
i.split("_")[
0] in identifiers_b_val]})
save_pickle(splits, existing_splits)
def split_4d_nii(nii_path, split_folder, pat_name=None, add_zeros=False):
# create temporary folder in which the 3d+t file will be split into many 3d files
temp_base = os.path.dirname(nii_path)
temp_location = os.path.join(temp_base, 'tmp')
if not os.path.isdir(temp_location):
os.mkdir(temp_location)
os.chdir(temp_location)
if not os.path.isdir(split_folder):
os.mkdir(split_folder)
_ = subprocess.call(['fslsplit', nii_path])
# rename files so that the patient's ID is in the filename
file_list = [f for f in os.listdir(temp_location) if os.path.isfile(f)]
file_list = sorted(file_list)
if not pat_name:
pat_name = os.path.basename(os.path.dirname(nii_path))
for ts, temp_file in enumerate(file_list):
# get time
time_step = temp_file.split('.')[0][3:]
# make sure the time step is a number. Otherwise trust in pythons sort algorithm
try:
int(time_step)
except:
time_step = ts
# change filename AND location -> move files
if add_zeros:
new_file_name = '{}_{}_0000.nii.gz'.format(pat_name, time_step)
else:
new_file_name = '{}_{}.nii.gz'.format(pat_name, time_step)
os.rename(os.path.join(temp_location, temp_file),
os.path.join(split_folder, new_file_name))
os.rmdir(temp_location)
def split_4d_parallel(args):
nii_path, split_folder, pat_name = args
split_4d_nii(nii_path, split_folder, pat_name)
def split_4d_for_all_pat(files_paths, split_folder):
p = pool.Pool(8)
p.map(split_4d_parallel,
zip(files_paths, [split_folder] * len(files_paths), [None] * len(files_paths)))
if __name__ == "__main__":
task_name = "Task114_heart_MNMs"
train_dir = "/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/nnUnet_raw/nnUNet_raw_data/{}/imagesTr".format(task_name)
test_dir = "/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/nnUnet_raw/nnUNet_raw_data/{}/imagesTs".format(task_name)
#out_dir='/media/full/tera2/output_nnUNet/preprocessed_data/Task114_heart_mnms'
out_dir='/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/tmp'
# train
all_train_files = [os.path.join(train_dir, x) for x in os.listdir(train_dir)]
# test
all_test_files = [os.path.join(test_dir, x) for x in os.listdir(test_dir)]
data_root = '/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/data/challenges/mms/Training-corrected_original/Labeled'
files_raw, files_gt = get_mnms_data(data_root=data_root)
split_path_raw ='/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/data/challenges/mms/temp_split_raw'
split_path_gt ='/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/data/challenges/mms/temp_split_gt'
maybe_mkdir_p(split_path_raw)
maybe_mkdir_p(split_path_gt)
split_4d_for_all_pat(files_raw, split_path_raw)
split_4d_for_all_pat(files_gt, split_path_gt)
out_dir = '/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/nnUnet_raw/nnUNet_raw_data/{}/'.format(task_name)
maybe_mkdir_p(join(out_dir, "imagesTr"))
maybe_mkdir_p(join(out_dir, "imagesTs"))
maybe_mkdir_p(join(out_dir, "labelsTr"))
imagesTr_path = os.path.join(out_dir, "imagesTr")
labelsTr_path = os.path.join(out_dir, "labelsTr")
select_annotated_frames_mms(split_path_raw, imagesTr_path, add_zeros=True)
select_annotated_frames_mms(split_path_gt, labelsTr_path, add_zeros=False)
labelsTr = subfiles(labelsTr_path)
json_dict = OrderedDict()
json_dict['name'] = "M&Ms"
json_dict['description'] = "short axis cardiac cine MRI segmentation"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "Campello, Víctor M. et al.: Multi-Centre, Multi-Vendor & Multi-Disease Cardiac Image Segmentation. In preparation."
json_dict['licence'] = "see M&Ms challenge"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "MRI",
}
# labels differ for ACDC challenge
json_dict['labels'] = {
"0": "background",
"1": "LVBP",
"2": "LVM",
"3": "RV"
}
json_dict['numTraining'] = len(labelsTr)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': "./imagesTr/%s" % i.split("/")[-1], "label": "./labelsTr/%s" % i.split("/")[-1]} for i in
labelsTr]
json_dict['test'] = []
save_json(json_dict, os.path.join(out_dir, "dataset.json"))
# then preprocess data and plan training.
# run in terminal
# > nnUNet_plan_and_preprocess -t <TaskID> --verify_dataset_integrity
# start training and stop it immediately to get a split.pkl file
# > nnUNet_train 2d nnUNetTrainerV2_MMS <TaskID> 0
#
# then create custom splits as used for the final M&Ms submission
#
split_file_path = '/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/output_nnUNet/preprocessed_data/{}/'.format(task_name)
create_custom_splits_for_experiments(split_file_path)
|
the-stack_0_12107 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/1/9 15:02
@Author : Zhangyu
@Email : [email protected]
@File : config.py
@Software: PyCharm
@Github : zhangyuo
"""
# train data
TRAIN_DATA_PATH_NEG = '../data/rt-polaritydata/rt-polarity.neg'
TRAIN_DATA_PATH_POS = '../data/rt-polaritydata/rt-polarity.pos'
# test data
TEST_DATA_PATH_NEG = '../data/test_data/rt-polarity.neg'
TEST_DATA_PATH_POS = '../data/test_data/rt-polarity.pos'
# model Hyperparameters
# percentage of the training data to use for validation
dev_sample_percentage = 0.1
# dropout keep_prob
dropout = 0.5
# word embedding dim
embedding_dim = 256
# comma-separated filter sizes
filter_sizes = 3, 4, 5
# number of filters per filter size
num_filters = 128
# l2 regularization lambda
l2_reg_lambda = 0.5
# Adam/Adadelta/Adagrad/RMSProp/Momentum/SGD
optimizer = "Adam"
# learning rate
lr = 1e-3
# gradient clipping
grad_clip = 5.0
# training parameters
# number of checkpoints to store
num_checkpoints = 5
# batch Size
batch_size = 128
# number of training epochs
num_epochs = 200
# evaluate model on dev set after this many steps
evaluate_every = 100
# save model after this many steps
checkpoint_every = 100
|
the-stack_0_12110 | # -*- coding: utf-8 -*-
"""Tests for binary data format and file."""
import io
import unittest
from dtfabric import errors as dtfabric_errors
from dtfabric.runtime import data_maps as dtfabric_data_maps
from dtfabric.runtime import fabric as dtfabric_fabric
from winregrc import data_format
from winregrc import errors
from tests import test_lib
class ErrorBytesIO(io.BytesIO):
"""Bytes IO that errors."""
# The following methods are part of the file-like object interface.
# pylint: disable=invalid-name
def read(self, size=None): # pylint: disable=redundant-returns-doc,unused-argument
"""Reads bytes.
Args:
size (Optional[int]): number of bytes to read, where None represents
all remaining bytes.
Returns:
bytes: bytes read.
Raises:
IOError: for testing.
OSError: for testing.
"""
raise IOError('Unable to read for testing purposes.')
class ErrorDataTypeMap(dtfabric_data_maps.DataTypeMap):
"""Data type map that errors."""
# pylint: disable=redundant-returns-doc
def FoldByteStream(self, mapped_value, **unused_kwargs):
"""Folds the data type into a byte stream.
Args:
mapped_value (object): mapped value.
Returns:
bytes: byte stream.
Raises:
FoldingError: if the data type definition cannot be folded into
the byte stream.
"""
raise dtfabric_errors.FoldingError(
'Unable to fold to byte stream for testing purposes.')
def MapByteStream(self, byte_stream, **unused_kwargs):
"""Maps the data type on a byte stream.
Args:
byte_stream (bytes): byte stream.
Returns:
object: mapped value.
Raises:
dtfabric.MappingError: if the data type definition cannot be mapped on
the byte stream.
"""
raise dtfabric_errors.MappingError(
'Unable to map byte stream for testing purposes.')
class BinaryDataFormatTest(test_lib.BaseTestCase):
"""Binary data format tests."""
# pylint: disable=protected-access
_DATA_TYPE_FABRIC_DEFINITION = b"""\
name: uint32
type: integer
attributes:
format: unsigned
size: 4
units: bytes
---
name: point3d
type: structure
attributes:
byte_order: little-endian
members:
- name: x
data_type: uint32
- name: y
data_type: uint32
- name: z
data_type: uint32
---
name: shape3d
type: structure
attributes:
byte_order: little-endian
members:
- name: number_of_points
data_type: uint32
- name: points
type: sequence
element_data_type: point3d
number_of_elements: shape3d.number_of_points
"""
_DATA_TYPE_FABRIC = dtfabric_fabric.DataTypeFabric(
yaml_definition=_DATA_TYPE_FABRIC_DEFINITION)
_POINT3D = _DATA_TYPE_FABRIC.CreateDataTypeMap('point3d')
_POINT3D_SIZE = _POINT3D.GetByteSize()
_SHAPE3D = _DATA_TYPE_FABRIC.CreateDataTypeMap('shape3d')
def testDebugPrintData(self):
"""Tests the _DebugPrintData function."""
output_writer = test_lib.TestOutputWriter()
test_format = data_format.BinaryDataFormat(
output_writer=output_writer)
data = b'\x00\x01\x02\x03\x04\x05\x06'
test_format._DebugPrintData('Description', data)
expected_output = [
'Description:\n',
('0x00000000 00 01 02 03 04 05 06 '
'.......\n\n')]
self.assertEqual(output_writer.output, expected_output)
def testDebugPrintDecimalValue(self):
"""Tests the _DebugPrintDecimalValue function."""
output_writer = test_lib.TestOutputWriter()
test_format = data_format.BinaryDataFormat(
output_writer=output_writer)
test_format._DebugPrintDecimalValue('Description', 1)
expected_output = ['Description\t\t\t\t\t\t\t\t: 1\n']
self.assertEqual(output_writer.output, expected_output)
# TODO add tests for _DebugPrintFiletimeValue
def testDebugPrintValue(self):
"""Tests the _DebugPrintValue function."""
output_writer = test_lib.TestOutputWriter()
test_format = data_format.BinaryDataFormat(
output_writer=output_writer)
test_format._DebugPrintValue('Description', 'Value')
expected_output = ['Description\t\t\t\t\t\t\t\t: Value\n']
self.assertEqual(output_writer.output, expected_output)
def testDebugPrintText(self):
"""Tests the _DebugPrintText function."""
output_writer = test_lib.TestOutputWriter()
test_format = data_format.BinaryDataFormat(
output_writer=output_writer)
test_format._DebugPrintText('Text')
expected_output = ['Text']
self.assertEqual(output_writer.output, expected_output)
def testFormatDataInHexadecimal(self):
"""Tests the _FormatDataInHexadecimal function."""
test_format = data_format.BinaryDataFormat()
data = b'\x00\x01\x02\x03\x04\x05\x06'
expected_formatted_data = (
'0x00000000 00 01 02 03 04 05 06 '
'.......\n'
'\n')
formatted_data = test_format._FormatDataInHexadecimal(data)
self.assertEqual(formatted_data, expected_formatted_data)
data = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09'
expected_formatted_data = (
'0x00000000 00 01 02 03 04 05 06 07 08 09 '
'..........\n'
'\n')
formatted_data = test_format._FormatDataInHexadecimal(data)
self.assertEqual(formatted_data, expected_formatted_data)
data = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
expected_formatted_data = (
'0x00000000 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f '
'................\n'
'\n')
formatted_data = test_format._FormatDataInHexadecimal(data)
self.assertEqual(formatted_data, expected_formatted_data)
data = (
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f')
expected_formatted_data = (
'0x00000000 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f '
'................\n'
'...\n'
'0x00000020 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f '
'................\n'
'\n')
formatted_data = test_format._FormatDataInHexadecimal(data)
self.assertEqual(formatted_data, expected_formatted_data)
# TODO: add tests for _GetDataTypeMap
# TODO: add tests for _ReadDefinitionFile
def testReadStructureFromByteStream(self):
"""Tests the _ReadStructureFromByteStream function."""
output_writer = test_lib.TestOutputWriter()
test_format = data_format.BinaryDataFormat(
debug=True, output_writer=output_writer)
test_format._ReadStructureFromByteStream(
b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00', 0,
self._POINT3D, 'point3d')
# Test with missing byte stream.
with self.assertRaises(ValueError):
test_format._ReadStructureFromByteStream(
None, 0, self._POINT3D, 'point3d')
# Test with missing data map type.
with self.assertRaises(ValueError):
test_format._ReadStructureFromByteStream(
b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00', 0, None,
'point3d')
# Test with data type map that raises an dtfabric.MappingError.
data_type_map = ErrorDataTypeMap(None)
with self.assertRaises(errors.ParseError):
test_format._ReadStructureFromByteStream(
b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00', 0,
data_type_map, 'point3d')
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12111 | #!/usr/bin/env python
#
# Copyright 2010-2011 The Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"; BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Author: Rares Vernica <rares (at) ics.uci.edu>
import sys
import pprint
from plotutils import *
from pychart import *
def rel(x):
global dat
y = []
y = [x[0]]
for i in xrange(1, len(x)):
y.append(x[i] / dat[0][i])
return y
(argv, p) = parse_arg(sys.argv)
if p['y_min'] is None:
p['y_min'] = 1
if p['y_max'] is None:
p['y_max'] = 5
###
### Data
###
pp = pprint.PrettyPrinter()
data = []
for d in p['dirs']:
dat = chart_data.read_csv(d + p['path'], p['format'])
dat = dat[p['start']:]
dat = chart_data.transform(rel, dat)
pp.pprint(dat)
data.append(dat)
###
### Plot
###
p['loc_area'] = (0, 0)
p['loc_legend'] = (10, 75)
plot_init(theme, p)
theme.reinitialize()
p['fname_out'] = 'rel-' + p['fname_out']
can = canvas.init(p['fname_out'])
xaxis = axis.X(
label='# Nodes and Dataset Size\n(times 2.5 x original)\n' + p['note'], format = '%d')
yaxis = axis.Y(label='Relative Scaleup', format = '%.0f')
legend = legend.T(loc = p['loc_legend'])
data_max_x = data[0][-1][0]
ar = area.T(
# size = (120, (p['y_max'] - p['y_min']) / 4 * 120),
loc = p['loc_area'],
x_axis = xaxis,
x_range = (2, data_max_x),
y_axis = yaxis,
y_range = (p['y_min'], p['y_max']),
y_grid_interval = p['y_grid_interval'],
legend = p['legend_instance'])
tick_marks = [tick_mark.star, tick_mark.blacksquare, tick_mark.tri, tick_mark.circle3, tick_mark.gray70dtri, ]
for i in xrange(len(data)):
for j in xrange(len(p['labels'])):
if p['mask'] and p['mask'][j] == '0':
continue
k = j
ycol = j + 1
if len(p['dirs']) > 1:
k = i
ycol = 1
ar.add_plot(
line_plot.T(
label = p['labels'][k],
data = data[i],
ycol = ycol,
tick_mark=tick_marks[k]))
if len(p['dirs']) > 1:
break
data_ideal = [[x[0], 1] for x in data[0]]
ar.add_plot(
line_plot.T(
label = 'Ideal',
data = data_ideal,
tick_mark=None,
line_style = line_style.T(width = .2)))
ar.draw()
can.close()
print_filename(p['fname_out'])
|
the-stack_0_12113 | """Process the raw EmoV-DB dataset.
This assumes the file structure from the original sorted data:
/.../
bea/
Angry/
*.wav
Amused/
*.wav
...
josh/
Angry/
*.wav
...
...
"""
from pathlib import Path
import click
from ertk.dataset import resample_rename_clips, write_annotations, write_filelist
from ertk.utils import PathlibPath
emotion_map = {
"Amused": "amusement",
"Angry": "anger",
"Disgusted": "disgust",
"Neutral": "neutral",
"Sleepy": "sleepiness",
}
gender_map = {"bea": "F", "jenie": "F", "josh": "M", "sam": "M"}
@click.command()
@click.argument("input_dir", type=PathlibPath(exists=True, file_okay=False))
@click.option("--resample/--noresample", default=True)
def main(input_dir: Path, resample: bool):
"""Process the EmoV-DB dataset at location INPUT_DIR and resample
audio to 16 kHz 16-bit WAV audio.
"""
paths = list(input_dir.glob("**/*.wav"))
resample_dir = Path("resampled")
mapping = {x: resample_dir / f"{x.parts[-3]}_{x.stem.lower()}.wav" for x in paths}
if resample:
resample_rename_clips(mapping=mapping)
resampled_paths = list(resample_dir.glob("*.wav"))
write_filelist(resampled_paths, "files_all")
write_annotations(
{mapping[p].stem: emotion_map[p.parts[-2]] for p in paths}, "label"
)
write_annotations({mapping[p].stem: p.parts[-3] for p in paths}, "speaker")
write_annotations(
{mapping[p].stem: gender_map[p.parts[-3]] for p in paths}, "gender"
)
write_annotations({mapping[p].stem: p.stem[-4:] for p in paths}, "sentence")
write_annotations({mapping[p].stem: "us" for p in paths}, "country")
write_annotations({mapping[p].stem: "en" for p in paths}, "language")
if __name__ == "__main__":
main()
|
the-stack_0_12114 | import unittest
from easy_music_generator.preprocessor import preprocessor as p
class TestPreprocessor(unittest.TestCase):
def test_parse_scores(self):
'''
Test that parse_scores() returns a score in the form of a list.
Not testing for actual score accuracy as it changes every run.
'''
preprocessor_obj = p.Preprocessor()
score = preprocessor_obj.parse_scores(
"./easy_music_generator/music/test_parse_scores/")
self.assertEqual("<class 'list'>", str(type(score)))
def test_parse_scores_no_files_found_exception(self):
'''
Test that parse_scores() raises an exception when the filepath
parameter does not contain any files.
'''
filepath = "./music/test_no_files_exception/"
preprocessor_obj = p.Preprocessor()
with self.assertRaises(p.NoFilesFoundException) as context:
preprocessor_obj.parse_scores(filepath)
self.assertEqual(
context.exception.message,
"No MIDI or MusicXML files found in the provided directory. Please check the path.")
|
the-stack_0_12116 | #MenuTitle: Find and Replace in Layer Names
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Replaces strings in layer names of all selected glyphs. Useful for adjusting layers for the bracket trick: http://glyphsapp.com/blog/alternating-glyph-shapes/
"""
import vanilla
class replaceInLayerNames(object):
def __init__(self):
# Window 'self.w':
windowWidth = 200
windowHeight = 130
windowWidthResize = 300 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Find and Replace in Layer Names", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.FindAndReplaceInLayerNames.mainwindow" # stores last window position and size
)
self.w.textSearch = vanilla.TextBox((15, 15, 67, 14), "Search for:", sizeStyle='small')
self.w.searchFor = vanilla.EditText((15+67, 12, -15, 19), "[130]", sizeStyle='small', callback=self.SavePreferences)
self.w.textReplace = vanilla.TextBox((15, 15+22, 67, 14), "Replace by:", sizeStyle='small')
self.w.replaceBy = vanilla.EditText((15+67, 12+22, -15, 19), "[150]", sizeStyle='small', callback=self.SavePreferences)
self.w.allGlyphs = vanilla.CheckBox( (15, 15+44, -15, 20), "Include all glyphs in font", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.replaceButton = vanilla.Button((-80, -40, -15, -5), "Replace", sizeStyle='small', callback=self.buttonCallback)
self.LoadPreferences()
self.w.setDefaultButton( self.w.replaceButton )
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.FindAndReplaceInLayerNames.searchFor"] = self.w.searchFor.get()
Glyphs.defaults["com.mekkablue.FindAndReplaceInLayerNames.replaceBy"] = self.w.replaceBy.get()
Glyphs.defaults["com.mekkablue.FindAndReplaceInLayerNames.allGlyphs"] = self.w.allGlyphs.get()
except:
return False
return True
def LoadPreferences( self ):
try:
Glyphs.registerDefault("com.mekkablue.FindAndReplaceInLayerNames.searchFor", "[10]")
Glyphs.registerDefault("com.mekkablue.FindAndReplaceInLayerNames.replaceBy", "[20]")
Glyphs.registerDefault("com.mekkablue.FindAndReplaceInLayerNames.allGlyphs", True)
self.w.searchFor.set( Glyphs.defaults["com.mekkablue.FindAndReplaceInLayerNames.searchFor"] )
self.w.replaceBy.set( Glyphs.defaults["com.mekkablue.FindAndReplaceInLayerNames.replaceBy"] )
self.w.allGlyphs.set( Glyphs.defaults["com.mekkablue.FindAndReplaceInLayerNames.allGlyphs"] )
except:
return False
return True
def buttonCallback(self, sender):
thisFont = Glyphs.font
if Glyphs.defaults["com.mekkablue.FindAndReplaceInLayerNames.allGlyphs"]:
glyphsToProcess = thisFont.glyphs
else:
selectedLayers = thisFont.selectedLayers
glyphsToProcess = [l.parent for l in selectedLayers]
searchFor = Glyphs.defaults["com.mekkablue.FindAndReplaceInLayerNames.searchFor"]
replaceBy = Glyphs.defaults["com.mekkablue.FindAndReplaceInLayerNames.replaceBy"]
replaceCount = 0
thisFont.disableUpdateInterface()
for thisGlyph in glyphsToProcess:
for thisLayer in thisGlyph.layers:
# do not change names of master layers:
if thisLayer.layerId != thisLayer.associatedFontMaster().id:
if thisLayer.name is None:
print("Warning! Empty layer name in: %s" % thisGlyph.name)
elif searchFor in thisLayer.name:
thisLayer.name = thisLayer.name.replace( searchFor, replaceBy )
print("%s: %s" % ( thisGlyph.name, thisLayer.name ))
replaceCount += 1
thisFont.enableUpdateInterface()
if replaceCount > 0:
Message(title="Replaced successfully", message="Replaced %i occurrences."%replaceCount, OKButton=None)
else:
Message(title="Nothing replaced", message="Could not find any occurrences of search string in the processed layers.", OKButton=None)
replaceInLayerNames()
|
the-stack_0_12119 | from collections import defaultdict, deque
def find_tree_diameter(g, n):
"""
Standard awesome problem
So for each node, I want to find the maximum distance to another node
:param g:
:return:
"""
# We can approach this question in the binary tree way (or) the graph way
# Tree - Post order traversal - This in itself is DFS template only
# Graph - Use routine DFS - Remember - tree is an undirected graph
diameter_of_tree = 0
for i in range(1, n + 1):
print(f"Considering {i} as root")
curr_max_length = 0
q = deque()
q.append((i, 0))
visited = set()
while q:
print("The queue is:", q)
node, length = q.pop()
visited.add(node)
curr_max_length = max(length, curr_max_length)
for nei in g[node]:
if nei not in visited:
q.append((nei, length + 1))
print(f"The max_length for {i} is: {curr_max_length}")
diameter_of_tree = max(curr_max_length, diameter_of_tree)
print("*****************************************************")
return diameter_of_tree
if __name__ == "__main__":
n = int(input())
g = defaultdict(list)
for i in range(0, n - 1):
u, v = map(int, input().split())
g[u].append(v)
g[v].append(u)
# print(g)
result = find_tree_diameter(g, n)
print(result)
|
the-stack_0_12120 | #MEGA SENA
#pergunte qntos jogos serão gerados e sorteie 6 números entre 1 a 60.
#cadastre td em uma lista composta.
from random import randint
print('--'*18)
print(f'{"JOGO DA MEGA SENA":^36}')
print('--'*18)
jogo = int(input('Quantos jogos você quer que eu sorteie? '))
matriz = [[], []]
for i in range(0, 6):
n = randint(1, 60)
matriz[1].append(n)
print(f'{matriz}')
|
the-stack_0_12121 | fcc_sources = {
"GFC": {
"asset": "UMD/hansen/global_forest_change_2020_v1_8",
"start": 2000,
"end": 2020,
},
"TMF": {
"asset": "projects/JRC/TMF/v1_2021/AnnualChanges",
"start": 1990,
"end": 2021,
},
}
"source of the forest change cover dataset"
|
the-stack_0_12122 | # removes all files created during testing
import glob
import os
paths = []
for pattern in [ '*.actual', '*.actual-rewrite', '*.rewrite', '*.process-output' ]:
paths += glob.glob('data/' + pattern)
for path in paths:
os.unlink(path)
|
the-stack_0_12123 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Friday Feb 20 2020
This code was implemented by
Louis Weyland, Floris Fok and Julien Fer
"""
# Import built-in libraries
import time
import math
import statistics
import multiprocessing
# import 3th party libraries
import numpy as np
import matplotlib.pyplot as plt
import yfinance as yf
from collections import defaultdict
# Import onw modules
from Binomial_tree import BinTreeOption, BlackScholes
def binomial_tree_1(N, T, S, K, r,market,option_type,save_plot=False):
'''
:param N: number of steps
:param T: period
:param S: stock price
:param K: strick price
:param r: interest rate
:param sigma: volatility
:param market: Eu or USA
:return: price of option & delta
'''
# Analyse various levels of volatility
sigmas = np.linspace(0.01, 0.99, 100)
trees= [
BinTreeOption(N, T, S, s, r, K, market, option_type)
for s in sigmas
]
bs = [BlackScholes(T, S, K, r, s) for s in sigmas]
# Determine call prices
call_prices = defaultdict(list)
for tree, bs in zip(trees, bs):
call_prices["Binomial tree"].append(tree.determine_price())
call_prices["Black Scholes"].append(bs.call_price())
# Make plot
plt.figure()
plt.plot(sigmas, [i[0] for i in call_prices["Binomial tree"]], '--',linewidth=3,label="Binomial tree")
plt.plot(sigmas, call_prices["Black Scholes"], label="Black Scholes")
plt.xlabel("Volatility (%) ",fontsize=12,fontweight='bold')
plt.ylabel("Price",fontsize=12,fontweight='bold')
plt.xticks(fontweight='bold')
plt.yticks(fontweight='bold')
plt.title(market+" "+option_type+" option price for various levels of volatility",fontsize=14,fontweight='bold')
plt.legend()
if save_plot:
plt.savefig("figures/"+market+"_"+option_type+"_volatility",dpi=300)
plt.show()
plt.close()
def worker(tree):
return tree.determine_price()
def binomial_tree_2( T, S, K, r, sigma, market, option_type,save_plot=False,run_time=True):
'''
:param T: period
:param S: stock price
:param K: strick price
:param r: interest rate
:param sigma: volatility
:param market: Eu or USA
:return: price of option & delta
'''
# Analyse time steps
steps = list(range(20, 500,5))
trees = [
BinTreeOption(step, T, S, sigma, r, K, market, option_type)
for step in steps
]
NUM_CORE = 2
pool = multiprocessing.Pool(NUM_CORE)
prices_trees = pool.map(worker, ((tree) for tree in trees))
pool.close()
pool.join()
bs = BlackScholes(T, S, K, r, sigma)
if option_type=='call':
bs_price = bs.call_price()
else:
bs_price = bs.put_price()
print("Black Scholes option price =",bs_price)
prices_bs = [bs_price] * len(steps)
# Make plot
plt.figure()
plt.plot(steps, [i[0] for i in prices_trees], label="Binomial tree")
plt.plot(steps, prices_bs, label="Black Scholes")
plt.xlabel("Time steps (a.u.)",fontsize=12,fontweight='bold')
plt.ylabel("Price",fontsize=12,fontweight='bold')
plt.xticks(fontweight='bold')
plt.yticks(fontweight='bold')
plt.title(market+" "+option_type+" option price for increasing time steps",fontsize=14,fontweight='bold')
plt.legend()
if save_plot:
plt.savefig("figures/"+market+"_"+option_type+"_time_steps",dpi=300)
# Get the running time
if run_time:
repetition = 20
running_time_matrix = np.zeros((len(steps) + 1, repetition))
steps = list(range(1, 100))
for i in range(repetition):
for step in steps:
start_time=time.time()
tree =BinTreeOption(step, T, S, sigma, r, K, market, option_type)
running_time=(time.time()-start_time)*100
running_time_matrix[step][i]=running_time
mean_running_time=np.mean(running_time_matrix,1)
mean_running_time=np.delete(mean_running_time,0)
plt.figure()
plt.plot(steps, mean_running_time, label="Running time")
plt.xlabel("Time steps (a.u.)",fontsize=12,fontweight='bold')
plt.ylabel("Running Time (ms)",fontsize=12,fontweight='bold')
plt.legend()
plt.xticks(fontweight='bold')
plt.yticks(fontweight='bold')
plt.title("Running time vs. Steps",fontsize=14,fontweight='bold')
if save_plot:
plt.savefig("figures/"+market+"_"+option_type+"_running_time",dpi=300)
plt.show()
plt.close()
def binomial_tree_3(N,T, S, K, r, market, option_type,save_plot=True):
'''
:param N: number of steps
:param T: period
:param S: stock price
:param K: strick price
:param r: interest rate
:param sigma: volatility
:param market: Eu or USA
:return: price of option & delta
'''
# Analyse various levels of volatility
sigmas = np.linspace(0.01, 0.99, 100)
trees = [
BinTreeOption(N, T, S, s, r, K, market, option_type)
for s in sigmas
]
bs_list = [BlackScholes(T, S, K, r, s) for s in sigmas]
call_prices = defaultdict(list)
for tree, bs in zip(trees, bs_list):
call_prices["Binomial tree"].append(tree.determine_price())
# Make plot
plt.figure()
plt.plot(sigmas, [i[1] for i in call_prices["Binomial tree"]],'--',linewidth=3, label="Binomial tree")
plt.plot(sigmas, [i[2] for i in call_prices["Binomial tree"]], label="Black Scholes")
plt.xlabel("Volatility (%) ",fontsize=12,fontweight='bold')
plt.ylabel(r"$\Delta$ (%)",fontsize=12,fontweight='bold')
plt.xticks(fontweight='bold')
plt.yticks(fontweight='bold')
plt.title(market+" "+option_type+r" $\Delta$ for various levels of volatility",fontsize=14,fontweight='bold')
plt.legend()
if save_plot:
plt.savefig("figures/"+market+"_"+option_type+"_volatility_delta",dpi=300)
plt.show()
plt.close()
def wiener_process(T, S0, K, r, sigma, steps=1,save_plot=True):
"""
:param T: Period
:param S0: Stock price at spot time
:param K: Strike price
:param r: interest rate
:param sigma: volatility
:param steps: number of steps
:param save_plot: to save the plot
:return: returns a plot of a simulated stock movement
"""
bs = BlackScholes(1, 100, 99, 0.06, 0.2, steps=365)
bs.create_price_path()
plt.figure()
plt.plot(bs.price_path)
plt.xlabel("Days",fontsize=12,fontweight='bold')
plt.ylabel("Stock price",fontsize=12,fontweight='bold')
plt.xticks(fontweight='bold')
plt.yticks(fontweight='bold')
plt.title("Stock price simulated based on the Wiener process",fontsize=14,fontweight='bold')
if save_plot:
plt.savefig("figures/"+"wiener_process",dpi=300)
plt.show()
plt.close()
def real_stock_data():
years = 1
rate = 0.06
def fill_year(data, open_close='Open'):
if open_close == "Open":
time_serie = data.Open
elif open_close == 'Close':
time_serie = data.Close
else:
print(open_close, 'is not knows')
return None
n_days_in_years = 365
days = np.zeros(n_days_in_years)
i = 0
s = 0
for start, timestamp in enumerate(time_serie.index):
if timestamp.weekday() == 0:
break
for value in time_serie[start:]:
days[i] = value
i += 1
s += 1
if s % 5 == 0:
days[i] = value
days[i + 1] = value
i += 2
if i == 365:
break
return days
def get_data(stock='AAPL', frm='2019-01-01', till='2020-02-01'):
data = yf.download(stock, frm, till)
return data
def get_implied_volatility(data):
volatility = np.std(data) / np.mean(data)
return volatility
def plot_price_path(B, title, hedge_plot=True):
fig, ax1 = plt.subplots()
x_price = [i / B.steps for i in range(B.steps)]
x_price = [i for i in range(1, 366)]
color = 'tab:red'
ax1.set_xlabel('Days',fontsize=12,fontweight='bold')
ax1.set_ylabel('Price', color=color,fontsize=12,fontweight='bold')
ax1.plot(x_price, B.price_path, color=color,
label="Discritized Black Scholes")
ax1.tick_params(axis='y', labelcolor=color)
plt.xticks(fontweight='bold')
plt.yticks(fontweight='bold')
if hedge_plot:
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
print('plot2')
# we already handled the x-label with ax1
ax2.set_ylabel('Delta', color=color,fontsize=12,fontweight='bold')
ax2.scatter(B.x_hedge, B.delta_list, color=color, label='Hedge delta')
ax2.plot(B.x_hedge, B.delta_list, linestyle='--', color=color)
ax2.tick_params(axis='y', labelcolor=color)
plt.yticks(fontweight='bold')
plt.title(title,fontsize=14,fontweight='bold')
plt.tight_layout()
plt.savefig("figures/"+title+'.png',dpi=300)
data = fill_year(get_data(stock='AAPL'), open_close='Open')
sigma = get_implied_volatility(data)
steps = 365
B = BlackScholes(years, data[0], data[0] - 1, rate, sigma, steps)
B.price_path = data
print('profit of Apple stocks:', B.create_hedge(52,hedge_setting='call'))
B.x_hedge = [i * 7 for i in range(0, 52)]
plot_price_path(B, 'Apple stocks simulation')
data = fill_year(get_data(stock='RDS-A'), open_close='Open')
sigma = get_implied_volatility(data)
B = BlackScholes(years, data[0], data[0] - 1, rate, sigma, steps)
B.price_path = data
print('profit of Shell simuation:', B.create_hedge(52))
B.x_hedge = [i * 7 for i in range(0, 52)]
plot_price_path(B, 'Shell stocks simulation')
def profit_histogram():
steps = 365
years = 1
start_price = 100
strike_price = 99
rate = 0.06
volatility = 0.2
prof=np.zeros(1000)
for i in range(1000):
B = BlackScholes(years, start_price, strike_price, rate, volatility, steps)
prof[i] = B.create_hedge(steps)
print("Daily: Standard Deviation ",statistics.stdev(prof))
fig = plt.figure()
plt.hist(prof, bins=20, label=f"with mean: {round(np.mean(prof), 3)}")
plt.xlabel('Profit',fontsize=12,fontweight='bold')
plt.ylabel('Frequency',fontsize=12,fontweight='bold')
plt.title('Hedging delta every day',fontsize=14,fontweight='bold')
plt.legend()
plt.xticks(fontweight='bold')
plt.yticks(fontweight='bold')
plt.tight_layout()
fig.savefig('figures/hedgedeltaday.png', dpi=300)
steps = 52
prof= np.zeros(1000)
for i in range(1000):
B = BlackScholes(years, start_price, strike_price, rate, volatility, steps)
prof[i] = B.create_hedge(steps)
print("Weekly: Standard Deviation ", statistics.stdev(prof))
fig = plt.figure()
plt.hist(prof, bins=20, label=f"with mean: {round(np.mean(prof), 3)}")
plt.xlabel('Profit',fontsize=12,fontweight='bold')
plt.ylabel('Frequency',fontsize=12,fontweight='bold')
plt.title('Hedging delta every week',fontsize=14,fontweight='bold')
plt.legend()
plt.xticks(fontweight='bold')
plt.yticks(fontweight='bold')
plt.tight_layout()
fig.savefig('figures/hedgedeltaweek.png', dpi=300)
def all_profit_histograms():
price_steps = 365
years = 1
start_price = 100
strike_price = 99
rate = 0.06
volatility = 0.2
fig = plt.figure()
steps_array = []
for steps in [10, 50, 100, 200, 300]:
prof = np.zeros(1000)
for i in range(1000):
B = BlackScholes(years, start_price, strike_price, rate, volatility, price_steps)
prof[i] = B.create_hedge(steps)
print("Steps ",steps,": Standard Deviation: ",statistics.stdev(prof))
plt.hist(prof, bins=20, label=f'n={steps}')
steps_array.append(np.mean(prof))
plt.xlabel('Profit',fontsize=12,fontweight='bold')
plt.ylabel('Frequency',fontsize=12,fontweight='bold')
plt.title('Hedging delta different intervals',fontsize=14,fontweight='bold')
plt.xticks(fontweight='bold')
plt.yticks(fontweight='bold')
plt.legend()
plt.tight_layout()
fig.savefig('figures/different_steps.png')
|
the-stack_0_12124 | """license: Apache License 2.0, see LICENSE for more details."""
import uuid
import time
from nose.tools import eq_
from kazoo.testing import KazooTestCase
from kazoo.recipe.partitioner import PartitionState
class KazooPartitionerTests(KazooTestCase):
def setUp(self):
super(KazooPartitionerTests, self).setUp()
self.path = "/" + uuid.uuid4().hex
def test_party_of_one(self):
partitioner = self.client.SetPartitioner(
self.path, set=(1, 2, 3), time_boundary=0.2)
partitioner.wait_for_acquire(14)
eq_(partitioner.state, PartitionState.ACQUIRED)
eq_(list(partitioner), [1, 2, 3])
partitioner.finish()
def test_party_of_two(self):
partitioners = [self.client.SetPartitioner(self.path, (1, 2),
identifier="p%s" % i, time_boundary=0.2)
for i in range(2)]
partitioners[0].wait_for_acquire(14)
partitioners[1].wait_for_acquire(14)
eq_(list(partitioners[0]), [1])
eq_(list(partitioners[1]), [2])
partitioners[0].finish()
time.sleep(0.1)
eq_(partitioners[1].release, True)
partitioners[1].finish()
def test_party_expansion(self):
partitioners = [self.client.SetPartitioner(self.path, (1, 2, 3),
identifier="p%s" % i, time_boundary=0.2)
for i in range(2)]
partitioners[0].wait_for_acquire(14)
partitioners[1].wait_for_acquire(14)
eq_(partitioners[0].state, PartitionState.ACQUIRED)
eq_(partitioners[1].state, PartitionState.ACQUIRED)
eq_(list(partitioners[0]), [1, 3])
eq_(list(partitioners[1]), [2])
# Add another partition, wait till they settle
partitioners.append(self.client.SetPartitioner(self.path, (1, 2, 3),
identifier="p2", time_boundary=0.2))
time.sleep(0.1)
eq_(partitioners[0].release, True)
for p in partitioners[:-1]:
p.release_set()
for p in partitioners:
p.wait_for_acquire(14)
eq_(list(partitioners[0]), [1])
eq_(list(partitioners[1]), [2])
eq_(list(partitioners[2]), [3])
for p in partitioners:
p.finish()
def test_more_members_than_set_items(self):
partitioners = [self.client.SetPartitioner(self.path, (1,),
identifier="p%s" % i, time_boundary=0.2)
for i in range(2)]
partitioners[0].wait_for_acquire(14)
partitioners[1].wait_for_acquire(14)
eq_(partitioners[0].state, PartitionState.ACQUIRED)
eq_(partitioners[1].state, PartitionState.ACQUIRED)
eq_(list(partitioners[0]), [1])
eq_(list(partitioners[1]), [])
for p in partitioners:
p.finish()
def test_party_session_failure(self):
partitioner = self.client.SetPartitioner(
self.path, set=(1, 2, 3), time_boundary=0.2)
partitioner.wait_for_acquire(14)
eq_(partitioner.state, PartitionState.ACQUIRED)
# simulate session failure
partitioner._fail_out()
partitioner.release_set()
self.assertTrue(partitioner.failed)
|
the-stack_0_12125 | # Author: Muratcan Cicek, https://users.soe.ucsc.edu/~cicekm/
from HeadCursorMapping.MappingABC import MappingABC
from InputEstimators.HeadPoseEstimators.MuratcansHeadGazer import MuratcansHeadGazer
from InputEstimators.HeadPoseEstimators.HeadPoseEstimatorABC import HeadPoseEstimatorABC
from abc import abstractmethod
class StaticMapping(MappingABC):
def _calculate(self):
inputRanges = self._inputBoundaries.getRanges()
outputRanges = self._outputBoundaries.getRanges()
ratios = self._inputBoundaries.getVolumeAbsRatio(self._inputValues)
if isinstance(self._inputEstimator, HeadPoseEstimatorABC) and \
not isinstance(self._inputEstimator, MuratcansHeadGazer):
t = ratios[0]; ratios[0] = ratios[1]; ratios[1] = t
i = self._outputValues.shape[0]
self._outputValues = ratios[:i] * outputRanges[:i]
return self._outputValues |
the-stack_0_12126 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
import torch
import torch.distributed as dist
import torch.optim
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer
from fairseq.optim.fused_adam import get_fused_adam_class
from omegaconf import II, DictConfig
logger = logging.getLogger(__name__)
@dataclass
class FairseqAdamConfig(FairseqDataclass):
adam_betas: str = field(
default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"}
)
adam_eps: float = field(
default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
use_old_adam: bool = field(
default=False, metadata={"help": "Use fairseq.optim.adam.Adam"}
)
# TODO common vars below in parent
tpu: bool = II("common.tpu")
lr: List[float] = II("optimization.lr")
@register_optimizer("adam", dataclass=FairseqAdamConfig)
class FairseqAdam(FairseqOptimizer):
"""Adam optimizer for fairseq.
Important note: this optimizer corresponds to the "AdamW" variant of
Adam in its weight decay behavior. As such, it is most closely
analogous to torch.optim.AdamW from PyTorch.
"""
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = (
not getattr(cfg, "use_old_adam", False)
and fused_adam_cls is not None
and torch.cuda.is_available()
)
if getattr(cfg, "tpu", False):
# on TPUs we use the Adam defined here, since it
# automatically casts gradients to FP32
self._optimizer = Adam(params, **self.optimizer_config)
elif use_fused_adam:
logger.info("using FusedAdam")
self._optimizer = fused_adam_cls(params, **self.optimizer_config)
else:
self._optimizer = Adam(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas),
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
}
def average_params(self):
"""Reduce Params is only used during BMUF distributed training."""
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for _, value in state_dict["state"].items():
value["exp_avg"] /= total_gpus
value["exp_avg_sq"] /= total_gpus
dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM)
dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM)
class Adam(torch.optim.Optimizer):
r"""Implements Adam algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
):
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
)
super(Adam, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group.get("amsgrad", False)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
if amsgrad:
state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
p_data_fp32
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
|
the-stack_0_12127 | from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from pypy.module.cpyext.api import PyObject
class AppTestStructSeq(AppTestCpythonExtensionBase):
def test_StructSeq(self):
module = self.import_extension('foo',
prologue="""
#include <structseq.h>
static PyTypeObject PyDatatype;
static PyStructSequence_Field Data_fields[] = {
{"value", "value_doc"},
{"value2", "value_doc"},
{"text", "text_doc"},
{"other", "other_doc"},
{NULL} /* Sentinel */
};
static PyStructSequence_Desc Data_desc = {
"cpyext_test.data", /*name*/
"data_doc", /*doc*/
Data_fields, /*fields*/
3, /*n_in_sequence*/
};
""",
functions=[
("new_structdata", "METH_NOARGS",
"""
PyObject *seq;
PyStructSequence_InitType(&PyDatatype, &Data_desc);
if (PyErr_Occurred()) return NULL;
seq = PyStructSequence_New(&PyDatatype);
if (!seq) return NULL;
PyStructSequence_SET_ITEM(seq, 0, PyLong_FromLong(42));
PyStructSequence_SET_ITEM(seq, 1, PyLong_FromLong(43));
PyStructSequence_SET_ITEM(seq, 2, PyUnicode_FromString("hello"));
PyStructSequence_SET_ITEM(seq, 3, PyUnicode_FromString("other"));
Py_DECREF(&PyDatatype);
return seq;
""")])
s = module.new_structdata()
assert tuple(s) == (42, 43, 'hello')
assert s.value == 42
assert s.text == 'hello'
assert s.other == 'other'
assert 'hello' in s
assert 'other' not in s
del s
|
the-stack_0_12128 | #!/usr/bin/python
"""
Driver for PDB2PQR
This module takes a PDB file as input and performs optimizations
before yielding a new PDB-style file as output.
Ported to Python by Todd Dolinsky ([email protected])
Washington University in St. Louis
Parsing utilities provided by Nathan A. Baker ([email protected])
Washington University in St. Louis
Copyright (c) 2002-2010, Jens Erik Nielsen, University College Dublin;
Nathan A. Baker, Washington University in St. Louis; Paul Czodrowski &
Gerhard Klebe, University of Marburg
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of University College Dublin, Washington University in
St. Louis, or University of Marburg nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__date__ = "5 April 2010"
__author__ = "Todd Dolinsky, Nathan Baker, Jens Nielsen, Paul Czodrowski, Jan Jensen, Samir Unni, Yong Huang"
__version__ = "1.6"
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from main import mainCommand
from main_cgi import mainCGI
from src.aconf import *
if __name__ == "__main__":
""" Determine if called from command line or CGI """
if not ("REQUEST_METHOD" in os.environ):
# Append Numeric/Numpy path to sys.path if the user specified a non-standard location during configuration
package_path = PACKAGE_PATH
if package_path != "":
sys.path.extend(package_path.split(":"))
mainCommand(sys.argv)
else:
mainCGI()
|
the-stack_0_12133 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface and utility functions to XLA.
This module wraps the XLA client(s) and builders to standardize their interfaces
and provide some automatic type mapping logic for converting between Numpy and
XLA. There are also a handful of related casting utilities.
"""
from functools import partial, lru_cache
import os
import threading
from typing import Any, Dict, List, Optional, Tuple, Union
import warnings
from absl import logging
# Disable "WARNING: Logging before flag parsing goes to stderr." message
logging._warn_preinit_stderr = 0
import jax._src.lib
from jax._src.config import flags, bool_env
from . import tpu_driver_client
from . import xla_client
from jax._src import util, traceback_util
import numpy as np
iree: Optional[Any]
try:
import jax._src.iree as iree # type: ignore
except ModuleNotFoundError:
iree = None
traceback_util.register_exclusion(__file__)
xops = xla_client.ops
FLAGS = flags.FLAGS
# TODO(phawkins): Remove jax_xla_backend.
flags.DEFINE_string(
'jax_xla_backend', '',
'Deprecated, please use --jax_platforms instead.')
flags.DEFINE_string(
'jax_backend_target', '',
'Either "local" or "rpc:address" to connect to a remote service target.')
# TODO(skye): warn when this is used once we test out --jax_platforms a bit
flags.DEFINE_string(
'jax_platform_name',
os.getenv('JAX_PLATFORM_NAME', '').lower(),
'Deprecated, please use --jax_platforms instead.')
flags.DEFINE_string(
'jax_platforms',
os.getenv('JAX_PLATFORMS', '').lower(),
'Comma-separated list of platform names specifying which platforms jax '
'should attempt to initialize. The first platform in the list that is '
'successfully initialized will be used as the default platform. For '
'example, --jax_platforms=cpu,gpu means that CPU and GPU backends will be '
'initialized, and the CPU backend will be used unless otherwise specified; '
'--jax_platforms=cpu means that only the CPU backend will be initialized. '
'By default, jax will try to initialize all available platforms and will '
'default to GPU or TPU if available, and fallback to CPU otherwise.')
flags.DEFINE_bool(
'jax_disable_most_optimizations',
bool_env('JAX_DISABLE_MOST_OPTIMIZATIONS', False),
'Try not to do much optimization work. This can be useful if the cost of '
'optimization is greater than that of running a less-optimized program.')
def get_compile_options(
num_replicas: int,
num_partitions: int,
device_assignment=None,
use_spmd_partitioning: bool = True,
) -> xla_client.CompileOptions:
"""Returns the compile options to use, as derived from flag values.
Args:
num_replicas: Number of replicas for which to compile.
num_partitions: Number of partitions for which to compile.
device_assignment: Optional tuple of integers indicating the assignment of
logical replicas to physical devices (default inherited from
xla_client.CompileOptions). Must be consistent with `num_replicas` and
`num_partitions`.
use_spmd_partitioning: boolean indicating whether to enable SPMD or MPMD
partitioning in XLA.
"""
compile_options = xla_client.CompileOptions()
compile_options.num_replicas = num_replicas
compile_options.num_partitions = num_partitions
build_options = compile_options.executable_build_options
build_options.use_spmd_partitioning = use_spmd_partitioning
if device_assignment is not None:
logging.vlog(
2,
'get_compile_options: num_replicas=%s num_partitions=%s device_assignment=%s',
num_replicas, num_partitions, device_assignment)
device_assignment = np.array(device_assignment)
# Allow 1D device assignment if num_partitions is 1.
if (device_assignment.ndim == 1) and (num_partitions == 1):
device_assignment = device_assignment[:, None]
if num_replicas != device_assignment.shape[0]:
msg = 'device_assignment does not match num_replicas: {} vs {}.'
raise ValueError(msg.format(device_assignment, num_replicas))
if num_partitions != device_assignment.shape[1]:
msg = 'device_assignment does not match num_partitions: {} vs {}.'
raise ValueError(msg.format(device_assignment, num_partitions))
device_assignment = xla_client.DeviceAssignment.create(device_assignment)
assert device_assignment.replica_count() == num_replicas
assert device_assignment.computation_count() == num_partitions
compile_options.device_assignment = device_assignment
debug_options = compile_options.executable_build_options.debug_options
if jax._src.lib.cuda_path is not None:
debug_options.xla_gpu_cuda_data_dir = jax._src.lib.cuda_path
if FLAGS.jax_disable_most_optimizations:
debug_options.xla_backend_optimization_level = 0
debug_options.xla_llvm_disable_expensive_passes = True
debug_options.xla_test_all_input_layouts = False
return compile_options
# Backends
def _make_tpu_driver_client():
if tpu_driver_client is None:
logging.info("Remote TPU is not linked into jax; skipping remote TPU.")
return None
if FLAGS.jax_backend_target is None:
logging.info("No --jax_backend_target was provided; skipping remote TPU.")
return None
return tpu_driver_client.TpuBackend.create(worker=FLAGS.jax_backend_target)
def tpu_client_timer_callback(timer_secs: float):
def _log_warning():
warnings.warn(
f'TPU backend initialization is taking more than {timer_secs} seconds. '
'Did you run your code on all TPU hosts? '
'See https://jax.readthedocs.io/en/latest/multi_process.html '
'for more information.')
# Will log a warning after `timer_secs`.
t = threading.Timer(timer_secs, _log_warning)
t.start()
try:
client = xla_client.make_tpu_client()
finally:
t.cancel()
return client
# Backends, in increasing order of preference.
# We have no particular opinion about how "backends" relate to "devices". For
# example, there could be multiple backends that provide the same kind of
# device.
_backend_factories = {}
_default_backend = None
_backends : Dict[str, Any] = {}
_backends_errors : Dict[str, str] = {}
_backend_lock = threading.Lock()
def register_backend_factory(name, factory, *, priority=0):
with _backend_lock:
if name in _backends:
raise RuntimeError(f"Backend {name} already initialized")
_backend_factories[name] = (factory, priority)
register_backend_factory('interpreter', xla_client.make_interpreter_client,
priority=-100)
register_backend_factory('cpu',
partial(xla_client.make_cpu_client, use_tfrt=True),
priority=0)
register_backend_factory('tpu_driver', _make_tpu_driver_client,
priority=100)
register_backend_factory('gpu', xla_client.make_gpu_client,
priority=200)
register_backend_factory(
'tpu', partial(tpu_client_timer_callback, timer_secs=60.0), priority=300)
if iree is not None:
register_backend_factory("iree", iree.iree_client_factory, priority=-100)
def backends():
global _backends
global _backends_errors
global _default_backend
with _backend_lock:
if _backends:
return _backends
if FLAGS.jax_platforms:
platforms = FLAGS.jax_platforms.split(",")
priorities = range(len(platforms), 0, -1)
platforms_and_priorites = zip(platforms, priorities)
else:
platforms_and_priorites = (
(platform, priority) for platform, (_, priority)
in _backend_factories.items())
default_priority = -1000
for platform, priority in platforms_and_priorites:
try:
backend = _init_backend(platform)
_backends[platform] = backend
if priority > default_priority:
_default_backend = backend
default_priority = priority
except Exception as err:
if platform in ('cpu', 'interpreter'):
# We always expect the CPU and interpreter backends to initialize
# successfully.
raise
else:
# If the backend isn't built into the binary, or if it has no devices,
# we expect a RuntimeError.
logging.info("Unable to initialize backend '%s': %s", platform,
err)
_backends_errors[platform] = str(err)
continue
if _default_backend.platform == "cpu" and FLAGS.jax_platform_name != 'cpu':
logging.warning('No GPU/TPU found, falling back to CPU. '
'(Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)')
return _backends
def _init_backend(platform):
factory, unused_priority = _backend_factories.get(platform, (None, None))
if factory is None:
raise RuntimeError(f"Unknown backend '{platform}'")
logging.vlog(1, "Initializing backend '%s'" % platform)
backend = factory()
# TODO(skye): consider raising more descriptive errors directly from backend
# factories instead of returning None.
if backend is None:
raise RuntimeError(f"Could not initialize backend '{platform}'")
if backend.device_count() == 0:
raise RuntimeError(f"Backend '{platform}' provides no devices.")
util.distributed_debug_log(("Initialized backend", backend.platform),
("process_index", backend.process_index()),
("device_count", backend.device_count()),
("local_devices", backend.local_devices()))
logging.vlog(1, "Backend '%s' initialized" % platform)
return backend
def _get_backend_uncached(platform=None):
# TODO(mattjj,skyewm): remove this input polymorphism after we clean up how
# 'backend' values are handled
if not isinstance(platform, (type(None), str)):
return platform
bs = backends()
platform = (platform or FLAGS.jax_xla_backend or FLAGS.jax_platform_name
or None)
if platform is not None:
backend = bs.get(platform, None)
if backend is None:
if platform in _backends_errors:
raise RuntimeError(f"Backend '{platform}' failed to initialize: "
f"{_backends_errors[platform]}")
raise RuntimeError(f"Unknown backend {platform}")
return backend
else:
return _default_backend
@lru_cache(maxsize=None) # don't use util.memoize because there is no X64 dependence.
def get_backend(platform=None):
return _get_backend_uncached(platform)
def get_device_backend(device=None):
"""Returns the Backend associated with `device`, or the default Backend."""
if device is not None:
return device.client
return get_backend()
def device_count(backend: Optional[str] = None) -> int:
"""Returns the total number of devices.
On most platforms, this is the same as :py:func:`jax.local_device_count`.
However, on multi-process platforms where different devices are associated
with different processes, this will return the total number of devices across
all processes.
Args:
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
Returns:
Number of devices.
"""
return int(get_backend(backend).device_count())
def local_device_count(backend: Optional[str] = None) -> int:
"""Returns the number of devices addressable by this process."""
return int(get_backend(backend).local_device_count())
def devices(backend: Optional[str] = None) -> List[xla_client.Device]:
"""Returns a list of all devices for a given backend.
.. currentmodule:: jaxlib.xla_extension
Each device is represented by a subclass of :class:`Device` (e.g.
:class:`CpuDevice`, :class:`GpuDevice`). The length of the returned list is
equal to ``device_count(backend)``. Local devices can be identified by
comparing :attr:`Device.process_index` to the value returned by
:py:func:`jax.process_index`.
If ``backend`` is ``None``, returns all the devices from the default backend.
The default backend is generally ``'gpu'`` or ``'tpu'`` if available,
otherwise ``'cpu'``.
Args:
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
Returns:
List of Device subclasses.
"""
return get_backend(backend).devices()
def default_backend() -> str:
"""Returns the platform name of the default XLA backend."""
return get_backend(None).platform
def local_devices(process_index: Optional[int] = None,
backend: Optional[str] = None,
host_id: Optional[int] = None) -> List[xla_client.Device]:
"""Like :py:func:`jax.devices`, but only returns devices local to a given process.
If ``process_index`` is ``None``, returns devices local to this process.
Args:
process_index: the integer index of the process. Process indices can be
retrieved via ``len(jax.process_count())``.
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
Returns:
List of Device subclasses.
"""
if host_id is not None:
warnings.warn(
"The argument to jax.local_devices has been renamed from `host_id` to "
"`process_index`. This alias will eventually be removed; please update "
"your code.")
process_index = host_id
if process_index is None:
process_index = get_backend(backend).process_index()
if not (0 <= process_index < process_count()):
raise ValueError(f"Unknown process_index {process_index}")
return [d for d in devices(backend) if d.process_index == process_index]
def process_index(backend: Optional[str] = None) -> int:
"""Returns the integer process index of this process.
On most platforms, this will always be 0. This will vary on multi-process
platforms though.
Args:
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
Returns:
Integer process index.
"""
return get_backend(backend).process_index()
# TODO: remove this sometime after jax 0.2.13 is released
def host_id(backend=None):
warnings.warn(
"jax.host_id has been renamed to jax.process_index. This alias "
"will eventually be removed; please update your code.")
return process_index(backend)
def process_count(backend: Optional[str] = None) -> int:
"""Returns the number of JAX processes associated with the backend."""
return max(d.process_index for d in devices(backend)) + 1
# TODO: remove this sometime after jax 0.2.13 is released
def host_count(backend=None):
warnings.warn(
"jax.host_count has been renamed to jax.process_count. This alias "
"will eventually be removed; please update your code.")
return process_count(backend)
# TODO: remove this sometime after jax 0.2.13 is released
def host_ids(backend=None):
warnings.warn(
"jax.host_ids has been deprecated; please use range(jax.process_count()) "
"instead. jax.host_ids will eventually be removed; please update your "
"code.")
return list(range(process_count(backend)))
### utility functions
def parameter(builder, num, shape, name=None, replicated=None):
if name is None:
name = ''
if replicated is None:
replicated = []
elif isinstance(replicated, bool):
replicated = [replicated] * shape.leaf_count()
return xops.Parameter(builder, num,
shape.with_major_to_minor_layout_if_absent(), name,
replicated)
# HLO instructions optionally can be annotated to say how the output should be
# spatially partitioned (represented in XLA as OpSharding protos, see
# _sharding_to_proto). For array outputs, the annotation is either an int per
# dimension specifying the number of ways that dimension divided (i.e. the total
# number of shards is the product), or None to indicate the array should be
# replicated. Tuple outputs are represented as tuples thereof. XLA supports
# arbitrary tuple nesting, but JAX only uses one level of tupling (and our type
# checkers don't support recursive types), so we only represent one level of
# nesting in this type definition.
SpatialSharding = Union[Tuple[int, ...],
None,
Tuple[Union[Tuple[int, ...], None], ...]]
def _sharding_to_proto(sharding: SpatialSharding):
"""Converts a SpatialSharding to an OpSharding.
See
https://github.com/tensorflow/tensorflow/blob/main/tensorflow/compiler/xla/xla_data.proto#L601
for details on the OpSharding proto.
"""
proto = xla_client.OpSharding()
if isinstance(sharding, tuple) and not isinstance(sharding[0], int):
assert all(s is None or isinstance(s, tuple) for s in sharding)
return tuple_sharding_proto(list(map(_sharding_to_proto, sharding))) # type: ignore
if sharding is None:
proto.type = xla_client.OpSharding.Type.REPLICATED
else:
proto.type = xla_client.OpSharding.Type.OTHER
proto.tile_assignment_dimensions = list(sharding)
proto.tile_assignment_devices = list(range(np.product(sharding)))
return proto
def tuple_sharding_proto(elems):
proto = xla_client.OpSharding()
assert all(isinstance(e, type(proto)) for e in elems)
proto.type = xla_client.OpSharding.Type.TUPLE
proto.tuple_shardings = elems
return proto
def set_sharding_proto(builder, op, sharding_proto):
"""Uses CustomCall to annotate a value as sharded."""
# "Sharding" is a built-in custom call target that acts like an identity
# function, and is used to attach an OpSharding to.
return with_sharding_proto(builder, sharding_proto, xops.CustomCall,
builder, b"Sharding", [op], builder.get_shape(op))
def with_sharding_proto(builder, sharding_proto, op_fn, *args, **kwargs):
"""Builds op_fn(*args, **kwargs) with sharding annotation."""
builder.set_sharding(sharding_proto)
try:
return op_fn(*args, **kwargs)
finally:
builder.clear_sharding()
def set_sharding(builder, op, sharding: SpatialSharding):
"""Uses CustomCall to annotate a value as sharded."""
return set_sharding_proto(builder, op, _sharding_to_proto(sharding))
def with_sharding(builder, sharding: SpatialSharding, op_fn, *args, **kwargs):
"""Builds op_fn(*args, **kwargs) with sharding annotation."""
return with_sharding_proto(builder, _sharding_to_proto(sharding), op_fn, *args, **kwargs)
|
the-stack_0_12135 | # encoding: utf-8
import numpy as np
import _interp
import _remapping
import pyroms
def z2roms(varz, grdz, grd, Cpos='rho', irange=None, jrange=None, \
spval=1e37, flood=True, dmax=0, cdepth=0, kk=0, \
mode='linear'):
"""
var = z2roms(var, grdz, grd)
optional switch:
- Cpos='rho', 'u' or 'v' specify the C-grid position where
the variable rely
- irange specify grid sub-sample for i direction
- jrange specify grid sub-sample for j direction
- spval=1e37 define spval value
- dmax=0 if dmax>0, maximum horizontal
flooding distance
- cdepth=0 critical depth for flooding
if depth<cdepth => no flooding
- kk
- mode='linear' or 'spline' specify the type of interpolation
Interpolate the variable from z vertical grid grdz to ROMS grid grd
"""
varz = varz.copy()
assert len(varz.shape) == 3, 'var must be 3D'
if mode=='linear':
imode=0
elif mode=='spline':
imode=1
else:
raise Warning('%s not supported, defaulting to linear' % mode)
if Cpos is 'rho':
z = grdz.vgrid.z[:]
depth = grd.vgrid.z_r[0,:]
mask = grd.hgrid.mask_rho
elif Cpos is 'u':
z = 0.5 * (grdz.vgrid.z[:,:,:-1] + grdz.vgrid.z[:,:,1:])
depth = 0.5 * (grd.vgrid.z_r[0,:,:,:-1] + grd.vgrid.z_r[0,:,:,1:])
mask = grd.hgrid.mask_u
elif Cpos is 'v':
z = 0.5 * (grdz.vgrid.z[:,:-1,:] + grdz.vgrid.z[:,1:,:])
depth = 0.5 * (grd.vgrid.z_r[0,:,:-1,:] + grd.vgrid.z_r[0,:,1:,:])
mask = grd.hgrid.mask_v
elif Cpos is 'w':
z = grdz.vgrid.z[:]
depth = grd.vgrid.z_w[0,:]
mask = grd.hgrid.mask_rho
else:
raise Warning('%s bad position. Use depth at Arakawa-C \
rho points instead.' % Cpos)
nlev, Mm, Lm = varz.shape
Nm = depth.shape[0]
if irange is None:
irange = (0,Lm)
else:
assert varz.shape[2] == irange[1]-irange[0], \
'var shape and irange must agree'
if jrange is None:
jrange = (0,Mm)
else:
assert varz.shape[1] == jrange[1]-jrange[0], \
'var shape and jrange must agree'
# flood varz if requested
if flood is True:
varz = pyroms.remapping.flood(varz, grdz, Cpos=Cpos, \
irange=irange, jrange=jrange, spval=spval, \
dmax=dmax, cdepth=cdepth, kk=kk)
varz = np.concatenate((varz[0:1,:,:], varz, varz[-1:,:,:]), 0)
z = np.concatenate((-9999*np.ones((1,z.shape[1], z.shape[2])), \
z, \
100*np.ones((1,z.shape[1], z.shape[2]))), 0)
var = np.ma.zeros((Nm, Mm, Lm))
for k in range(Nm):
var[k,:,:] = _interp.xhslice(varz, \
z[:,jrange[0]:jrange[1], irange[0]:irange[1]], \
depth[k,jrange[0]:jrange[1], irange[0]:irange[1]], \
mask[jrange[0]:jrange[1], irange[0]:irange[1]], \
imode, spval)
#mask
var = np.ma.masked_values(var, spval, rtol=1e-5)
#var[k,:,:] = np.ma.masked_where(mask == 0, var[k,:,:])
return var
|
the-stack_0_12136 | import logging
from typing import Dict, Optional, Tuple
from xml.etree import ElementTree as ET
import requests
logger = logging.getLogger('jriver.mcws')
class MediaServer:
def __init__(self, ip: str, auth: Optional[Tuple[str, str]] = None, secure: bool = False):
self.__ip = ip
self.__auth = auth
self.__secure = secure
self.__base_url = f"http{'s' if secure else ''}://{ip}/MCWS/v1"
self.__token = None
def as_dict(self) -> dict:
return {self.__ip: (self.__auth, self.__secure)}
def __repr__(self):
suffix = f" [{self.__auth[0]}]" if self.__auth else ' [Unauthenticated]'
return f"{self.__ip}{suffix}"
def authenticate(self) -> bool:
self.__token = None
url = f"{self.__base_url}/Authenticate"
r = requests.get(url, auth=self.__auth, timeout=(1, 5))
if r.status_code == 200:
response = ET.fromstring(r.content)
if response:
r_status = response.attrib.get('Status', None)
if r_status == 'OK':
for item in response:
if item.attrib['Name'] == 'Token':
self.__token = item.text
if self.connected:
return True
else:
raise MCWSError('Authentication failure', r.url, r.status_code, r.text)
@property
def connected(self) -> bool:
return self.__token is not None
def get_zones(self) -> Dict[str, str]:
self.__auth_if_required()
r = requests.get(f"{self.__base_url}/Playback/Zones", params={'Token': self.__token}, timeout=(1, 5))
if r.status_code == 200:
response = ET.fromstring(r.content)
if response:
r_status = response.attrib.get('Status', None)
if r_status == 'OK':
zones = {}
for child in response:
if child.tag == 'Item' and 'Name' in child.attrib:
attrib = child.attrib['Name']
if attrib.startswith('ZoneName'):
item_idx = attrib[8:]
if item_idx in zones:
zones[item_idx]['name'] = child.text
else:
zones[item_idx] = {'name': child.text}
elif attrib.startswith('ZoneID'):
item_idx = attrib[6:]
if item_idx in zones:
zones[item_idx]['id'] = child.text
else:
zones[item_idx] = {'id': child.text}
return {v['name']: v['id'] for v in zones.values()}
raise MCWSError('No zones loaded', r.url, r.status_code, r.text)
def __auth_if_required(self):
if not self.connected:
self.authenticate()
def get_dsp(self, zone_id: str) -> Optional[str]:
self.__auth_if_required()
r = requests.get(f"{self.__base_url}/Playback/SaveDSPPreset",
params={'Token': self.__token, 'Zone': zone_id, 'ZoneType': 'ID'},
timeout=(1, 5))
if r.status_code == 200:
response = ET.fromstring(r.text)
if response:
if response.tag == 'DSP':
return r.text
elif response.tag == 'Response':
r_status = response.attrib.get('Status', None)
if r_status == 'OK':
for child in response:
if child.tag == 'Item' and 'Name' in child.attrib and child.attrib['Name'] == 'Preset':
return child.text
raise MCWSError('No DSP loaded', r.url, r.status_code, r.text)
def set_dsp(self, zone_id: str, dsp: str) -> bool:
self.__auth_if_required()
dsp = dsp.replace('\n', '\r\n')
if not dsp.endswith('\r\n'):
dsp = dsp + '\r\n'
r = requests.post(f"{self.__base_url}/Playback/LoadDSPPreset",
params={'Token': self.__token, 'Zone': zone_id, 'ZoneType': 'ID'},
files={'Name': (None, dsp)},
timeout=(1, 5))
if r.status_code == 200:
logger.debug(f"LoadDSPPreset/{zone_id} success")
loaded_dsp = self.get_dsp(zone_id)
if self.__compare(loaded_dsp, dsp):
return True
else:
raise DSPMismatchError(zone_id, dsp, loaded_dsp)
else:
raise MCWSError('DSP not set', r.url, r.status_code, r.text)
@staticmethod
def __compare(a: str, b: str):
a_xml = ET.canonicalize(a)
b_xml = ET.canonicalize(b)
return a_xml == b_xml
class DSPMismatchError(Exception):
def __init__(self, zone_id: str, expected: str, actual):
super().__init__(f"Mismatch in DSP loaded to {zone_id}")
self.zone_id = zone_id
self.expected = expected
self.actual = actual
class MCWSError(Exception):
def __init__(self, msg: str, url: str, status_code: int, resp: Optional[str] = None):
super().__init__(msg)
self.msg = msg
self.url = url
self.status_code = status_code
self.resp = resp
|
the-stack_0_12138 | """Extract the most recurrent tokens of the template text"""
import json
import more_itertools
import mwxml
import datetime
from typing import Iterable, Iterator, Mapping, Optional
from backports.datetime_fromisoformat import MonkeyPatch
# nltk
from .. import extractors, user_warnings_en, user_warnings_it, user_warnings_es, user_warnings_ca, utils
import math
import random
# Polyfiller for retrocompatibiliy with Python3.5
MonkeyPatch.patch_fromisoformat()
# MAX REVISIONS
MAX_REVISION_CACHE = 100
# REVISION STORAGE
REVISION_STORAGE = list()
# time interval in seconds
time_interval_in_seconds = {
'1 day': 86400,
'1 week': 604800
}
# user warnings templates
user_warnings_templates = set(
user_warnings_en.block_templates_indefinitely_blocked_templates + \
user_warnings_en.block_templates + \
user_warnings_en.arbitration_enforcement_templates_1RR_related_templates + \
user_warnings_en.arbitration_enforcement_templates_pages_with_discretionary_sanctions_editnotice + \
user_warnings_en.arbitration_enforcement_templates + \
user_warnings_en.csd_warning_templates + \
user_warnings_en.community_authorised_general_sanctions_templates + \
user_warnings_en.community_authorised_general_sanctions_templates_standardized + \
user_warnings_en.community_authorised_general_sanctions_templates_obsolete + \
user_warnings_en.non_english_welcome + \
user_warnings_en.non_english + \
user_warnings_en.test_templates + \
user_warnings_en.standardized_templates + \
user_warnings_en.user_warnings_templates + \
user_warnings_it.avviso_utenti_anonimi + \
user_warnings_it.benvenuto + \
user_warnings_it.benvenuto_progetti + \
user_warnings_it.avviso_copyright + \
user_warnings_it.avviso_invito_progetti + \
user_warnings_it.vandalismo + \
user_warnings_es.bienvenida + \
user_warnings_es.permission_grant_notification_templates + \
user_warnings_es.user_warnings + \
user_warnings_ca.benvinguda + \
user_warnings_ca.Avisos_de_discussio + \
user_warnings_ca.plantilles_d_avisos_d_edicio_generics + \
user_warnings_ca.plantilles_d_avisos_d_edicio + \
user_warnings_ca.plantilles_d_avisos_d_idioma + \
user_warnings_ca.plantilles_d_avisos
)
# REVISION AND PAGE CLASSES
class Revision:
"""Class which represent a revision of the template page"""
def __init__(self, id: str, user: mwxml.Revision.User, timestamp: str, template_info: extractors.user_warnings_template_words.UserWarningTf):
self.id = id # revision id
self.user = user # revision user
self.timestamp = timestamp # revision timestamp
self.template_info = template_info # template information about the words stemmed and without stopwords and occurences
self.words_to_search = list() # list of the k words which characterizes the the template the most (k = template_info.total_number_words / 2)
def to_dict(self) -> str:
"""Converts the object instance into a dictionary"""
obj = dict()
obj['id'] = self.id
user_id = ''
user_name = ''
if self.user:
user_id = self.user.id
user_name = self.user.text
obj['user_id'] = user_id
obj['user_name'] = user_name
obj['timestamp'] = self.timestamp
obj['template_info'] = self.template_info.to_dict()
obj['words_to_search'] = self.words_to_search
return obj
def __repr__(self):
return 'date: {}'.format(self.timestamp)
def __lt__(self, other):
return datetime.datetime.fromisoformat(self.timestamp.replace('Z', '+00:00')) < datetime.datetime.fromisoformat(other.timestamp.replace('Z', '+00:00'))
class Page:
"""Class which represent a page containing a list of revisions"""
def __init__(self, id: str, namespace: str, title: str, revisions: Iterator[Revision], tfidf: Mapping, idf: Mapping, occurences_in_corpus: Mapping):
self.id = id # page id
self.namespace = namespace # page namespace
self.title = title # page title
self.revisions = revisions # list of revisions
self.tfidf=tfidf # tf-idf metrics
self.occurences_in_corpus = occurences_in_corpus # stemmed word occurences in corups (1 if the word appear in a corpus 0 othewise)
self.idf = idf # idf metric in corpus
def to_dict(self) -> Mapping:
"""Converts the object instance into a dictionary"""
obj = dict()
obj['id'] = self.id
obj['namespace'] = self.namespace
obj['title'] = self.title
obj['revisions'] = list()
for rev in self.revisions:
obj['revisions'].append(rev.to_dict())
obj['tf-idf'] = self.tfidf
obj['occurences_in_corupus'] = self.occurences_in_corpus
obj['idf'] = self.idf
return obj
def extract_revisions(
mw_page: mwxml.Page,
stats: Mapping,
only_last_revision: bool,
language: str,
stemmer: bool) -> Iterator[Revision]:
"""Extracts the history of a user_warning_template within a template page -> most important keywords."""
revisions = more_itertools.peekable(mw_page)
# Newest revisions, useful only if the only_last_revision flag is set equal to true
newest_revision = None
for mw_revision in revisions:
utils.dot()
# check if it's last revision
is_last_revision = not utils.has_next(revisions)
# remove html comments
text = utils.remove_comments(mw_revision.text or '')
# extract the template text and other info
template_info = extractors.user_warnings_template_words.userwarnings_words_extractor(text, language, stemmer)
# Build the revision
rev = Revision(
id=mw_revision.id,
user=mw_revision.user,
timestamp=mw_revision.timestamp.to_json(),
template_info=template_info,
)
# Check the oldest revisions possible
if not newest_revision:
newest_revision = rev
else:
newest_date = datetime.datetime.fromisoformat(newest_revision.timestamp.replace('Z', '+00:00'))
current_date = datetime.datetime.fromisoformat(mw_revision.timestamp.to_json().replace('Z', '+00:00'))
# change the revision if the current one is newer
if newest_date < current_date:
newest_revision = rev
# Update stats
stats['performance']['revisions_analyzed'] += 1
# requested only the last revision
if only_last_revision:
if is_last_revision:
yield newest_revision
else:
yield rev
def extract_pages(
dump: Iterable[mwxml.Page],
stats: Mapping,
only_last_revision: bool,
set_interval: Optional[str],
esclude_template_repetition: bool,
language: str,
stemmer: bool,
minimum_word_length: int) -> Iterator[Page]:
"""Extract the templates from an user page."""
counter = 1
# Loop on all the pages in the dump, one at a time
for mw_page in dump:
utils.log("Processing", mw_page.title)
# Skip non-template, according to https://en.wikipedia.org/wiki/Wikipedia:Namespace
if mw_page.namespace != 10:
utils.log('Skipped (namespace != 10)')
continue
# flag which tells if the revision can be stored
store_flag = False
# those revision can replace / be stored in the revision_storage
if not mw_page.title.lower() in user_warnings_templates:
store_flag = True
else:
counter += 1
revisions_generator = extract_revisions(
mw_page,
stats=stats,
only_last_revision=only_last_revision,
language=language,
stemmer=stemmer
)
revisions_list = list(revisions_generator)
# sort the revision list by date
revisions_list.sort()
# filtered revision list
filtered_revisions_list = list()
# reference revisions
reference_rev = None
# take the first reference revision and insert it
if revisions_list:
reference_rev = revisions_list[0]
filtered_revisions_list.append(reference_rev)
# partition time by time interval specified by set_interval
if set_interval or esclude_template_repetition:
for elem in revisions_list:
# ge the last inserted and current time interval
last_inserted_time = datetime.datetime.fromisoformat(reference_rev.timestamp.replace('Z', '+00:00'))
current_time = datetime.datetime.fromisoformat(elem.timestamp.replace('Z', '+00:00'))
condition = True
if set_interval:
# condition for the time interval
condition = condition and (current_time - last_inserted_time).total_seconds() < time_interval_in_seconds[set_interval]
if esclude_template_repetition:
# condition for the different regexp
condition = condition and reference_rev.template_info.template_text != elem.template_info.template_text
if condition:
filtered_revisions_list[-1] = elem # substitute because included in the time interval (partitioned by the time interval)
else:
# if there is the different regexp selected then inserted only if the previous one has different regexp than the current one
if not (esclude_template_repetition and reference_rev.template_info.template_text == elem.template_info.template_text):
filtered_revisions_list.append(elem)
reference_rev = elem
else:
# no tag selected
filtered_revisions_list = revisions_list
# filter out the empty revisions
filtered_revisions_list = [ rev for rev in filtered_revisions_list if rev.template_info.total_number_words != 0 ]
if store_flag:
# REVISION STORAGE update
rev_storage_size = len(REVISION_STORAGE)
filtered_rev_size = len(filtered_revisions_list)
# store the revision in this cache
if (rev_storage_size + filtered_rev_size) <= MAX_REVISION_CACHE:
# fill the revision storage
REVISION_STORAGE.extend(filtered_revisions_list)
elif rev_storage_size <= MAX_REVISION_CACHE:
# replace some revisions
min_length = min(rev_storage_size, filtered_rev_size)
for i in range(random.randrange(min_length)):
REVISION_STORAGE[i] = filtered_revisions_list[i]
else:
# fill and replace some revisions
filtered_rev_counter = 0
while(rev_storage_size < MAX_REVISION_CACHE):
REVISION_STORAGE.append(filtered_revisions_list[filtered_rev_counter])
filtered_rev_counter += 1
rev_storage_size += 1
for index in range(filtered_rev_counter, filtered_rev_size):
rev_storage_index = random.randrange(rev_storage_size)
REVISION_STORAGE[rev_storage_index] = filtered_revisions_list[index]
else:
# extended corpus
extended_corpus = list(filtered_revisions_list)
rev_range_size = len(REVISION_STORAGE)
# extended corpus
for index in range(len(filtered_revisions_list)):
extended_corpus.append(REVISION_STORAGE[random.randrange(rev_range_size)])
# element occur in document
is_in_document_dict = dict()
corpus_size = len(extended_corpus)
# word list
words_list = set()
# retrieve only the interesting words
for revision in filtered_revisions_list:
for word in revision.template_info.inf_retrieval:
words_list.add(word)
# is in document calculus
for revision in extended_corpus:
for word in revision.template_info.inf_retrieval:
# only in the interesting words
if word in words_list:
if not word in is_in_document_dict:
is_in_document_dict[word] = 1
else:
is_in_document_dict[word] += 1
# idf word calculus
idf_dict = dict() # idf per corpus
for word in is_in_document_dict:
idf_dict[word] = math.log(corpus_size / is_in_document_dict[word], 10)
# tf-idf calculus
# girare il loop o qualcosa di simile, vedere dopo come
tfidf = dict() # the corpus is constant, so it will be indicized by word and document
for word in is_in_document_dict: # for every word
tfidf[word] = dict()
for doc_index in range(len(filtered_revisions_list)): # for all document
rev = filtered_revisions_list[doc_index]
# calculate tf for word in document
if word in rev.template_info.inf_retrieval:
tf = rev.template_info.inf_retrieval[word] / rev.template_info.total_number_words
else:
tf = 0
# multiply it by the idf of that word
tfidf[word][doc_index] = tf * idf_dict[word]
# assign the words to keep
rev.words_to_search.append((word, tfidf[word][doc_index]))
# take the words needed
for rev in filtered_revisions_list:
k = int(rev.template_info.total_number_words / 2)
# words to search
rev.words_to_search.sort(key = lambda a: a[1], reverse=True)
# check if there's a minimum amount of character needed:
if minimum_word_length:
index = 0
for word,_ in rev.words_to_search:
# controlling the word size
if len(word) > minimum_word_length:
rev.words_to_search[index] = (word,_)
index += 1
rev.words_to_search = rev.words_to_search[:index]
# taking the k values with the highest tf-idf metric value associated
rev.words_to_search = [ el[0] for el in rev.words_to_search[:k] ]
# stats update
if not language in stats['user_warnings_templates']:
stats['user_warnings_templates'][language] = dict()
stats['user_warnings_templates'][language][mw_page.title] = dict()
stats['user_warnings_templates'][language][mw_page.title]['word_occurences'] = is_in_document_dict
stats['user_warnings_templates'][language][mw_page.title]['tf-idf'] = tfidf
page = Page(
id=mw_page.id,
namespace=mw_page.namespace,
title=mw_page.title,
revisions=filtered_revisions_list,
tfidf=tfidf,
idf=idf_dict,
occurences_in_corpus=is_in_document_dict
)
yield page
def configure_subparsers(subparsers):
"""Configure a new subparser for the known languages."""
parser = subparsers.add_parser(
'extract-user-warnings-templates-tokens',
help='Extract the tokens of the templates of the users warnings',
)
parser.add_argument(
'--only-last-revision',
action='store_true',
help='Consider only the last revision for each page.',
)
parser.add_argument(
'--set-interval',
choices={None, '1 day', '1 week'},
required=False,
default=None,
help='Time interval at the end of which to return the revison',
)
parser.add_argument(
'--esclude-template-repetition',
action='store_true',
help='It does not return a revision if the same template was previously declared',
)
parser.add_argument(
'--language',
choices={'italian', 'catalan', 'spanish', 'english'},
required=True,
help='Language of the analyzed dump',
)
parser.add_argument(
'--rev-cache',
action='store_true',
required=False,
help='Max revision cache',
)
parser.add_argument(
'--stemmer',
action='store_true',
required=False,
help='Retrieve stemmed words',
)
parser.add_argument(
'--minimum-word-length',
action='store',
type=int,
default=0,
required=False,
help='Minimum word lenght to retrieve',
)
parser.set_defaults(func=main)
def main(
dump: Iterable[mwxml.Page],
features_output_h,
stats_output_h,
args) -> None:
"""Main function that parses the arguments and writes the output."""
stats = {
'performance': {
'start_time': None,
'end_time': None,
'revisions_analyzed': 0,
'pages_analyzed': 0,
},
'user_warnings_templates': dict() # maybe the top 5 or all the best templates
}
if args.rev_cache:
try:
global MAX_REVISION_CACHE
x = int(args.rev_cache)
if x > 0:
MAX_REVISION_CACHE = x
except ValueError:
pass
pages_generator = extract_pages(
dump,
stats=stats,
only_last_revision=args.only_last_revision,
set_interval=args.set_interval,
esclude_template_repetition=args.esclude_template_repetition,
language=args.language,
stemmer=args.stemmer,
minimum_word_length=args.minimum_word_length
)
stats['performance']['start_time'] = datetime.datetime.utcnow()
for obj in pages_generator:
features_output_h.write(json.dumps(obj.to_dict()))
features_output_h.write("\n")
stats['performance']['end_time'] = datetime.datetime.utcnow()
stats_output_h.write(json.dumps(stats, indent=4, default=str)) |
the-stack_0_12139 | class CompilationEngine:
"""
compiles a jack source file from a jack tokenizer into xml form in output_file
"""
TERMINAL_TOKEN_TYPES = ["STRING_CONST", "INT_CONST", "IDENTIFIER", "SYMBOL"]
TERMINAL_KEYWORDS = ["boolean", "class", "void", "int"]
CLASS_VAR_DEC_TOKENS = ["static", "field"]
SUBROUTINE_TOKENS = ["function", "method", "constructor"]
STATEMENT_TOKENS = ['do', 'let', 'while', 'return', 'if']
STARTING_TOKENS = {
'var_dec': ['var'],
'parameter_list': ['('],
'subroutine_body': ['{'],
'expression_list': ['('],
'expression': ['=', '[', '(']
}
TERMINATING_TOKENS = {
'class': ['}'],
'class_var_dec': [';'],
'subroutine': ['}'],
'parameter_list': [')'],
'expression_list': [')'],
'statements': ['}'],
'do': [';'],
'let': [';'],
'while': ['}'],
'if': ['}'],
'var_dec': [';'],
'return': [';'],
'expression': [';', ')', ']', ',']
}
OPERATORS = [
'+',
'-',
'*',
'/',
'&',
'|',
'<',
'>',
'='
]
UNARY_OPERATORS = ['-', '~']
def __init__(self, tokenizer, output_file):
self.tokenizer = tokenizer
self.output_file = output_file
def compile_class(self):
"""
everything needed to compile a class, the basic unit of compilation
"""
self._write_current_outer_tag(body="class")
while self.tokenizer.has_more_tokens:
self.tokenizer.advance()
if self._terminal_token_type() or self._terminal_keyword():
self._write_current_terminal_token()
elif self.tokenizer.current_token in self.CLASS_VAR_DEC_TOKENS:
self.compile_class_var_dec()
elif self.tokenizer.current_token in self.SUBROUTINE_TOKENS:
self.compile_subroutine()
self._write_current_outer_tag(body="/class")
def compile_class_var_dec(self):
"""
example: field int x;
"""
self._write_current_outer_tag(body="classVarDec")
self._write_current_terminal_token()
while self._not_terminal_token_for('class_var_dec'):
self.tokenizer.advance()
self._write_current_terminal_token()
self._write_current_outer_tag(body="/classVarDec")
def compile_subroutine(self):
"""
example: methoid void dispose() { ...
"""
self._write_current_outer_tag(body="subroutineDec")
self._write_current_terminal_token()
while self._not_terminal_token_for('subroutine'):
self.tokenizer.advance()
if self._starting_token_for('parameter_list'):
self.compile_parameter_list()
elif self._starting_token_for('subroutine_body'):
self.compile_subroutine_body()
else:
self._write_current_terminal_token()
self._write_current_outer_tag(body="/subroutineDec")
def compile_parameter_list(self):
"""
example: dispose(int a, int b)
"""
# write starting (
self._write_current_terminal_token()
self._write_current_outer_tag(body="parameterList")
while self._not_terminal_token_for(position='next', keyword_token='parameter_list'):
self.tokenizer.advance()
self._write_current_terminal_token()
self._write_current_outer_tag(body="/parameterList")
# advance to closing )
self.tokenizer.advance()
self._write_current_terminal_token()
# '{' varDec* statements '}'
def compile_subroutine_body(self):
"""
example: { do square.dispose() };
"""
self._write_current_outer_tag(body="subroutineBody")
# write opening {
self._write_current_terminal_token()
while self._not_terminal_token_for('subroutine'):
self.tokenizer.advance()
if self._starting_token_for('var_dec'):
self.compile_var_dec()
elif self._statement_token():
self.compile_statements()
else:
self._write_current_terminal_token()
# write closing }
self._write_current_terminal_token()
self._write_current_outer_tag(body="/subroutineBody")
# 'var' type varName (',' varName)* ';'
def compile_var_dec(self):
"""
example: var int a;
"""
self._write_current_outer_tag(body="varDec")
self._write_current_terminal_token()
while self._not_terminal_token_for('var_dec'):
self.tokenizer.advance()
self._write_current_terminal_token()
self._write_current_outer_tag(body="/varDec")
def compile_statements(self):
"""
call correct statement
"""
self._write_current_outer_tag(body="statements")
while self._not_terminal_token_for('subroutine'):
if self.tokenizer.current_token == "if":
self.compile_if()
elif self.tokenizer.current_token == "do":
self.compile_do()
elif self.tokenizer.current_token == "let":
self.compile_let()
elif self.tokenizer.current_token == "while":
self.compile_while()
elif self.tokenizer.current_token == "return":
self.compile_return()
self.tokenizer.advance()
self._write_current_outer_tag(body="/statements")
def compile_statement_body(self, not_terminate_func, condition_func, do_something_special_func):
"""
way to help DRY up statement body
maybe a little confusing?
"""
while not_terminate_func():
self.tokenizer.advance()
if condition_func():
do_something_special_func()
else:
self._write_current_terminal_token()
def compile_do(self):
"""
example: do square.dispose();
"""
self._write_current_outer_tag(body="doStatement")
self._write_current_terminal_token()
# experimental
def do_terminator_func():
return self._not_terminal_token_for('do')
def do_condition_func():
return self._starting_token_for('expression_list')
def do_do_something_func():
return self.compile_expression_list()
self.compile_statement_body(do_terminator_func, do_condition_func, do_do_something_func)
self._write_current_outer_tag(body="/doStatement")
# LEAVING UNDRY FOR NOW TO SEE WHAT NEXT PROJECT BRINGS
# 'let' varName ('[' expression ']')? '=' expression ';'
def compile_let(self):
"""
example: let direction = 0;
"""
self._write_current_outer_tag(body="letStatement")
# write let keyword
self._write_current_terminal_token()
while self._not_terminal_token_for('let'):
self.tokenizer.advance()
if self._starting_token_for('expression'):
self.compile_expression()
else:
self._write_current_terminal_token()
self._write_current_outer_tag(body="/letStatement")
# 'while' '(' expression ')' '{' statements '}'
def compile_while(self):
"""
example: while (x > 0) { ... }
"""
self._write_current_outer_tag(body="whileStatement")
# write keyword while
self._write_current_terminal_token()
# advance to expression start (
self.tokenizer.advance()
# compile expression in ()
self.compile_expression()
while self._not_terminal_token_for('while'):
self.tokenizer.advance()
if self._statement_token():
self.compile_statements()
else:
self._write_current_terminal_token()
# write terminal token
self._write_current_terminal_token()
self._write_current_outer_tag(body="/whileStatement")
def compile_if(self):
"""
example: if (True) { ... } else { ... }
"""
self._write_current_outer_tag(body="ifStatement")
# write keyword if
self._write_current_terminal_token()
# advance to expression start
self.tokenizer.advance()
# compile expression in ()
self.compile_expression()
def not_terminate_func():
return self._not_terminal_token_for('if')
def condition_func():
return self._statement_token()
def do_something_special_func():
return self.compile_statements()
self.compile_statement_body(not_terminate_func, condition_func, do_something_special_func)
# compile else
if self.tokenizer.next_token == "else":
# write closing {
self._write_current_terminal_token()
# past closing {
self.tokenizer.advance()
# write else
self._write_current_terminal_token()
# same as above
self.compile_statement_body(
not_terminate_func,
condition_func,
do_something_special_func
)
# write terminal token
self._write_current_terminal_token()
self._write_current_outer_tag(body="/ifStatement")
# term (op term)*
def compile_expression(self):
"""
many examples..i,e., x = 4
"""
self._write_current_terminal_token()
self._write_current_outer_tag(body="expression")
# check starting for unary negative
if self._starting_token_for('expression') and self._next_token_is_negative_unary_operator():
unary_negative_token = True
else:
unary_negative_token = False
self.tokenizer.advance()
while self._not_terminal_token_for('expression'):
if self._operator_token() and not unary_negative_token:
self._write_current_terminal_token()
self.tokenizer.advance()
else:
self.compile_term()
self._write_current_outer_tag(body="/expression")
self._write_current_terminal_token()
def compile_expression_in_expression_list(self):
"""
separeted out of compile_expression because of edge cases from normal expression
example: (x, y, x + 5)
"""
self._write_current_outer_tag(body="expression")
# go till , or (
while self._not_terminal_token_for('expression'):
if self._operator_token():
self._write_current_terminal_token()
self.tokenizer.advance()
else:
self.compile_term()
# term takes care of advancing..
self._write_current_outer_tag(body="/expression")
# (expression (',' expression)* )?
def compile_expression_list(self):
"""
separeted out of compile_expression because of edge cases from normal expression
example: (x, y, x + 5)
"""
# write (
self._write_current_terminal_token()
self._write_current_outer_tag(body="expressionList")
# skip initial (
self.tokenizer.advance()
while self._not_terminal_token_for('expression_list'):
self.compile_expression_in_expression_list()
# current token could be , or ) to end expression list
if self._another_expression_coming():
self._write_current_terminal_token()
self.tokenizer.advance()
self._write_current_outer_tag(body="/expressionList")
# write )
self._write_current_terminal_token()
# integerConstant | stringConstant | keywordConstant | varName |
# varName '[' expression ']' | subroutineCall | '(' expression ')' | unaryOp term
def compile_term(self):
"""
most compilicated and difficult part of compiler
TODO: try to simplify
"""
self._write_current_outer_tag(body="term")
while self._not_terminal_condition_for_term():
if self.tokenizer.part_of_subroutine_call():
self.compile_expression_list()
elif self._starting_token_for('expression'):
self.compile_expression()
elif self.tokenizer.current_token in self.UNARY_OPERATORS:
self._write_current_terminal_token()
if self._starting_token_for(keyword_token='expression', position='next'):
self.tokenizer.advance()
self.compile_term()
break
else:
self.tokenizer.advance()
# write inner term
self._write_current_outer_tag(body="term")
self._write_current_terminal_token()
self._write_current_outer_tag(body="/term")
else:
self._write_current_terminal_token()
# i.e., i *
if self._next_token_is_operation_not_in_expression():
self.tokenizer.advance()
break
self.tokenizer.advance()
self._write_current_outer_tag(body="/term")
def compile_return(self):
"""
example: return x; or return;
"""
self._write_current_outer_tag(body="returnStatement")
if self._not_terminal_token_for(keyword_token='return', position='next'):
self.compile_expression()
else: # write return and ; for void
self._write_current_terminal_token()
self.tokenizer.advance()
self._write_current_terminal_token()
self._write_current_outer_tag(body="/returnStatement")
def _write_current_outer_tag(self, body):
self.output_file.write("<{}>\n".format(body))
def _write_current_terminal_token(self):
# conform to expected xml
if self.tokenizer.current_token_type() == "STRING_CONST":
tag_name = "stringConstant"
elif self.tokenizer.current_token_type() == "INT_CONST":
tag_name = "integerConstant"
else:
tag_name = self.tokenizer.current_token_type().lower()
if self.tokenizer.current_token_type() == "STRING_CONST":
value = self.tokenizer.current_token.replace("\"", "")
else:
value = self.tokenizer.current_token
self.output_file.write(
"<{}> {} </{}>\n".format(
tag_name,
value,
tag_name
)
)
def _terminal_token_type(self):
return self.tokenizer.current_token_type() in self.TERMINAL_TOKEN_TYPES
def _terminal_keyword(self):
return self.tokenizer.current_token in self.TERMINAL_KEYWORDS
def _not_terminal_token_for(self, keyword_token, position='current'):
if position == 'current':
return not self.tokenizer.current_token in self.TERMINATING_TOKENS[keyword_token]
elif position == 'next':
return not self.tokenizer.next_token in self.TERMINATING_TOKENS[keyword_token]
def _starting_token_for(self, keyword_token, position='current'):
if position == 'current':
return self.tokenizer.current_token in self.STARTING_TOKENS[keyword_token]
elif position == 'next':
return self.tokenizer.next_token in self.STARTING_TOKENS[keyword_token]
def _statement_token(self):
return self.tokenizer.current_token in self.STATEMENT_TOKENS
def _operator_token(self, position='current'):
if position == 'current':
return self.tokenizer.current_token in self.OPERATORS
elif position == 'next':
return self.tokenizer.next_token in self.OPERATORS
def _next_token_is_negative_unary_operator(self):
return self.tokenizer.next_token == "-"
def _another_expression_coming(self):
return self.tokenizer.current_token == ","
def _not_terminal_condition_for_term(self):
# expression happens to cover all bases
return self._not_terminal_token_for('expression')
def _next_token_is_operation_not_in_expression(self):
return self._operator_token(position='next') and not self._starting_token_for('expression')
|
the-stack_0_12142 | import bisect
import os
import json
from .utils import BlacklistItemsWrapper
from collections import OrderedDict
from utils import AutoDatabase, AutoLexicalizer
from .utils import DialogDataset, DialogDatasetItem, split_name
DATASETS_PATH = os.path.join(os.path.expanduser(os.environ.get('DATASETS_PATH', '~/datasets')), 'soloist')
def build_blacklist(items, domains=None):
for i, (dialogue, items) in enumerate(items):
if domains is not None and set(dialogue['domains']).difference(domains):
yield i
elif items[-1]['speaker'] != 'system':
yield i
def load_dataset(name, use_goal=False, context_window_size=15, domains=None, **kwargs) -> DialogDataset:
name, split = split_name(name)
path = os.path.join(DATASETS_PATH, name)
with open(os.path.join(path, f'{split}.json'), 'r') as f:
data = json.load(f, object_pairs_hook=OrderedDict)
dialogues = data['dialogues']
items = DialogueItems(dialogues)
items = BlacklistItemsWrapper(items, list(build_blacklist(items, domains)))
def transform(x):
dialogue, items = x
context = [s['text'] for s in items[:-1]]
if context_window_size is not None and context_window_size > 0:
context = context[-context_window_size:]
belief = items[-1]['belief']
database = items[-1]['database']
item = DialogDatasetItem(context, raw_belief=belief, database=database,
response=items[-1]['delexicalised_text'], raw_response=items[-1]['text'])
if use_goal:
setattr(item, 'goal', dialogue['goal'])
# MultiWOZ evaluation uses booked domains property
if 'booked_domains' in items[-1]:
setattr(item, 'booked_domains', items[-1]['booked_domains'])
setattr(item, 'dialogue_act', items[-1]['dialogue_act'])
setattr(item, 'active_domain', items[-1]['active_domain'])
return item
dataset = DialogDataset(items, transform=transform, domains=data['domains'])
if os.path.exists(os.path.join(path, 'database.zip')):
dataset.database = AutoDatabase.load(path)
if os.path.exists(os.path.join(path, 'lexicalizer.zip')):
dataset.lexicalizer = AutoLexicalizer.load(path)
return dataset
class DialogueItems:
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
r.append(e + s)
s += e
return r
def __init__(self, dialogues):
lengths = [len(x['items']) for x in dialogues]
self.cumulative_sizes = DialogueItems.cumsum(lengths)
self.dialogues = dialogues
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dialogue_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dialogue_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dialogue_idx - 1]
return self.dialogues[dialogue_idx], self.dialogues[dialogue_idx]['items'][:sample_idx + 1]
def __len__(self):
if not self.cumulative_sizes:
return 0
return self.cumulative_sizes[-1]
|
the-stack_0_12143 | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import sys, random, os
import bpy, bpy_extras
"""
Some utility functions for interacting with Blender
"""
def extract_args(input_argv=None):
"""
Pull out command-line arguments after "--". Blender ignores command-line flags
after --, so this lets us forward command line arguments from the blender
invocation to our own script.
"""
if input_argv is None:
input_argv = sys.argv
output_argv = []
if '--' in input_argv:
idx = input_argv.index('--')
output_argv = input_argv[(idx + 1):]
return output_argv
def parse_args(parser, argv=None):
return parser.parse_args(extract_args(argv))
# I wonder if there's a better way to do this?
def delete_object(obj):
""" Delete a specified blender object """
for o in bpy.data.objects:
o.select_set(False)
obj.select_set(True)
bpy.ops.object.delete()
def get_camera_coords(cam, pos):
"""
For a specified point, get both the 3D coordinates and 2D pixel-space
coordinates of the point from the perspective of the camera.
Inputs:
- cam: Camera object
- pos: Vector giving 3D world-space position
Returns a tuple of:
- (px, py, pz): px and py give 2D image-space coordinates; pz gives depth
in the range [-1, 1]
"""
scene = bpy.context.scene
x, y, z = bpy_extras.object_utils.world_to_camera_view(scene, cam, pos)
scale = scene.render.resolution_percentage / 100.0
w = int(scale * scene.render.resolution_x)
h = int(scale * scene.render.resolution_y)
px = int(round(x * w))
py = int(round(h - y * h))
return (px, py, z)
def set_layer(obj, layer_idx):
""" Move an object to a particular layer """
# Set the target layer to True first because an object must always be on
# at least one layer.
obj.layers[layer_idx] = True
for i in range(len(obj.layers)):
obj.layers[i] = (i == layer_idx)
def add_object(object_dir, name, scale, loc, theta=0):
"""
Load an object from a file. We assume that in the directory object_dir, there
is a file named "$name.blend" which contains a single object named "$name"
that has unit size and is centered at the origin.
- scale: scalar giving the size that the object should be in the scene
- loc: tuple (x, y) giving the coordinates on the ground plane where the
object should be placed.
"""
# First figure out how many of this object are already in the scene so we can
# give the new object a unique name
count = 0
for obj in bpy.data.objects:
if obj.name.startswith(name):
count += 1
filename = os.path.join(object_dir, '%s.blend' % name, 'Object', name)
bpy.ops.wm.append(filename=filename)
# Give it a new name to avoid conflicts
new_name = '%s_%d' % (name, count)
bpy.data.objects[name].name = new_name
# Set the new object as active, then rotate, scale, and translate it
x, y = loc
bpy.context.view_layer.objects.active = bpy.data.objects[new_name]
bpy.context.object.rotation_euler[2] = theta
bpy.ops.transform.resize(value=(scale, scale, scale))
bpy.ops.transform.translate(value=(x, y, scale))
def load_materials(material_dir):
"""
Load materials from a directory. We assume that the directory contains .blend
files with one material each. The file X.blend has a single NodeTree item named
X; this NodeTree item must have a "Color" input that accepts an RGBA value.
"""
for fn in os.listdir(material_dir):
if not fn.endswith('.blend'): continue
name = os.path.splitext(fn)[0]
filepath = os.path.join(material_dir, fn, 'NodeTree', name)
bpy.ops.wm.append(filename=filepath)
def add_material(name, **properties):
"""
Create a new material and assign it to the active object. "name" should be the
name of a material that has been previously loaded using load_materials.
"""
# Figure out how many materials are already in the scene
mat_count = len(bpy.data.materials)
# Create a new material; it is not attached to anything and
# it will be called "Material"
bpy.ops.material.new()
# Get a reference to the material we just created and rename it;
# then the next time we make a new material it will still be called
# "Material" and we will still be able to look it up by name
mat = bpy.data.materials['Material']
mat.name = 'Material_%d' % mat_count
# Attach the new material to the active object
# Make sure it doesn't already have materials
obj = bpy.context.active_object
assert len(obj.data.materials) == 0
obj.data.materials.append(mat)
# Find the output node of the new material
output_node = None
for n in mat.node_tree.nodes:
if n.name == 'Material Output':
output_node = n
break
# Add a new GroupNode to the node tree of the active material,
# and copy the node tree from the preloaded node group to the
# new group node. This copying seems to happen by-value, so
# we can create multiple materials of the same type without them
# clobbering each other
group_node = mat.node_tree.nodes.new('ShaderNodeGroup')
group_node.node_tree = bpy.data.node_groups[name]
# Find and set the "Color" input of the new group node
for inp in group_node.inputs:
if inp.name in properties:
inp.default_value = properties[inp.name]
# Wire the output of the new group node to the input of
# the MaterialOutput node
mat.node_tree.links.new(
group_node.outputs['Shader'],
output_node.inputs['Surface'],
)
|
the-stack_0_12144 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import matrix
from ducktape.mark.resource import cluster
from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.streams import StreamsSmokeTestDriverService, StreamsSmokeTestJobRunnerService
class StreamsSmokeTest(KafkaTest):
"""
Simple test of Kafka Streams.
"""
def __init__(self, test_context):
super(StreamsSmokeTest, self).__init__(test_context, num_zk=1, num_brokers=3, topics={
'echo' : { 'partitions': 5, 'replication-factor': 1 },
'data' : { 'partitions': 5, 'replication-factor': 1 },
'min' : { 'partitions': 5, 'replication-factor': 1 },
'min-suppressed' : { 'partitions': 5, 'replication-factor': 1 },
'min-raw' : { 'partitions': 5, 'replication-factor': 1 },
'max' : { 'partitions': 5, 'replication-factor': 1 },
'sum' : { 'partitions': 5, 'replication-factor': 1 },
'sws-raw' : { 'partitions': 5, 'replication-factor': 1 },
'sws-suppressed' : { 'partitions': 5, 'replication-factor': 1 },
'dif' : { 'partitions': 5, 'replication-factor': 1 },
'cnt' : { 'partitions': 5, 'replication-factor': 1 },
'avg' : { 'partitions': 5, 'replication-factor': 1 },
'wcnt' : { 'partitions': 5, 'replication-factor': 1 },
'tagg' : { 'partitions': 5, 'replication-factor': 1 }
})
self.test_context = test_context
self.driver = StreamsSmokeTestDriverService(test_context, self.kafka)
@cluster(num_nodes=8)
@matrix(processing_guarantee=['at_least_once', 'exactly_once', 'exactly_once_beta'], crash=[True, False])
def test_streams(self, processing_guarantee, crash):
processor1 = StreamsSmokeTestJobRunnerService(self.test_context, self.kafka, processing_guarantee)
processor2 = StreamsSmokeTestJobRunnerService(self.test_context, self.kafka, processing_guarantee)
processor3 = StreamsSmokeTestJobRunnerService(self.test_context, self.kafka, processing_guarantee)
with processor1.node.account.monitor_log(processor1.STDOUT_FILE) as monitor1:
processor1.start()
monitor1.wait_until('REBALANCING -> RUNNING',
timeout_sec=60,
err_msg="Never saw 'REBALANCING -> RUNNING' message " + str(processor1.node.account)
)
self.driver.start()
monitor1.wait_until('processed',
timeout_sec=30,
err_msg="Didn't see any processing messages " + str(processor1.node.account)
)
# make sure we're not already done processing (which would invalidate the test)
self.driver.node.account.ssh("! grep 'Result Verification' %s" % self.driver.STDOUT_FILE, allow_fail=False)
processor1.stop_nodes(not crash)
with processor2.node.account.monitor_log(processor2.STDOUT_FILE) as monitor2:
processor2.start()
monitor2.wait_until('REBALANCING -> RUNNING',
timeout_sec=120,
err_msg="Never saw 'REBALANCING -> RUNNING' message " + str(processor2.node.account)
)
monitor2.wait_until('processed',
timeout_sec=30,
err_msg="Didn't see any processing messages " + str(processor2.node.account)
)
# make sure we're not already done processing (which would invalidate the test)
self.driver.node.account.ssh("! grep 'Result Verification' %s" % self.driver.STDOUT_FILE, allow_fail=False)
processor2.stop_nodes(not crash)
with processor3.node.account.monitor_log(processor3.STDOUT_FILE) as monitor3:
processor3.start()
monitor3.wait_until('REBALANCING -> RUNNING',
timeout_sec=120,
err_msg="Never saw 'REBALANCING -> RUNNING' message " + str(processor3.node.account)
)
# there should still be some data left for this processor to work on.
monitor3.wait_until('processed',
timeout_sec=30,
err_msg="Didn't see any processing messages " + str(processor3.node.account)
)
self.driver.wait()
self.driver.stop()
processor3.stop()
if crash and processing_guarantee == 'at_least_once':
self.driver.node.account.ssh("grep -E 'SUCCESS|PROCESSED-MORE-THAN-GENERATED' %s" % self.driver.STDOUT_FILE, allow_fail=False)
else:
self.driver.node.account.ssh("grep SUCCESS %s" % self.driver.STDOUT_FILE, allow_fail=False)
|
the-stack_0_12145 | # coding=utf-8
"""
safety.py - Alerts about malicious URLs
Copyright © 2014, Elad Alfassa, <[email protected]>
Licensed under the Eiffel Forum License 2.
This module uses virustotal.com
"""
import sopel.web as web
from sopel.config.types import StaticSection, ValidatedAttribute, ListAttribute
from sopel.formatting import color, bold
from sopel.logger import get_logger
from sopel.module import OP
import sopel.tools
import sys
import json
import time
import os.path
import re
if sys.version_info.major > 2:
str = str
from urllib.request import urlretrieve
from urllib.parse import urlparse
else:
from urllib.request import urlretrieve
from urllib.parse import urlparse
LOGGER = get_logger(__name__)
vt_base_api_url = 'https://www.virustotal.com/vtapi/v2/url/'
malware_domains = set()
known_good = []
class SafetySection(StaticSection):
enabled_by_default = ValidatedAttribute('enabled_by_default', bool, default=True)
"""Enable URL safety in all channels where it isn't explicitly disabled."""
known_good = ListAttribute('known_good')
"""List of "known good" domains to ignore."""
vt_api_key = ValidatedAttribute('vt_api_key')
"""Optional VirusTotal API key."""
def configure(config):
config.define_section('safety', SafetySection)
config.safety.configure_setting(
'enabled_by_default',
"Enable URL safety in channels that don't specifically disable it?",
)
config.safety.configure_setting(
'known_good',
'Enter any domains to whitelist',
)
config.safety.configure_setting(
'vt_api_key',
"Optionally, enter a VirusTotal API key to improve malicious URL "
"protection.\nOtherwise, only the Malwarebytes DB will be used."
)
def setup(bot):
bot.config.define_section('safety', SafetySection)
bot.memory['safety_cache'] = sopel.tools.SopelMemory()
for item in bot.config.safety.known_good:
known_good.append(re.compile(item, re.I))
loc = os.path.join(bot.config.homedir, 'malwaredomains.txt')
if os.path.isfile(loc):
if os.path.getmtime(loc) < time.time() - 24 * 60 * 60 * 7:
# File exists but older than one week, update
_download_malwaredomains_db(loc)
else:
_download_malwaredomains_db(loc)
with open(loc, 'r') as f:
for line in f:
clean_line = str(line).strip().lower()
if clean_line != '':
malware_domains.add(clean_line)
def _download_malwaredomains_db(path):
print('Downloading malwaredomains db...')
urlretrieve('http://mirror1.malwaredomains.com/files/justdomains', path)
@sopel.module.rule('(?u).*(https?://\S+).*')
@sopel.module.priority('high')
def url_handler(bot, trigger):
""" Check for malicious URLs """
check = True # Enable URL checking
strict = False # Strict mode: kick on malicious URL
positives = 0 # Number of engines saying it's malicious
total = 0 # Number of total engines
use_vt = True # Use VirusTotal
check = bot.config.safety.enabled_by_default
if check is None:
# If not set, assume default
check = True
# DB overrides config:
setting = bot.db.get_channel_value(trigger.sender, 'safety')
if setting is not None:
if setting == 'off':
return # Not checking
elif setting in ['on', 'strict', 'local', 'local strict']:
check = True
if setting == 'strict' or setting == 'local strict':
strict = True
if setting == 'local' or setting == 'local strict':
use_vt = False
if not check:
return # Not overriden by DB, configured default off
netloc = urlparse(trigger.group(1)).netloc
if any(regex.search(netloc) for regex in known_good):
return # Whitelisted
apikey = bot.config.safety.vt_api_key
try:
if apikey is not None and use_vt:
payload = {'resource': str(trigger),
'apikey': apikey,
'scan': '1'}
if trigger not in bot.memory['safety_cache']:
result = web.post(vt_base_api_url + 'report', payload)
if sys.version_info.major > 2:
result = result.decode('utf-8')
result = json.loads(result)
age = time.time()
data = {'positives': result['positives'],
'total': result['total'],
'age': age}
bot.memory['safety_cache'][trigger] = data
if len(bot.memory['safety_cache']) > 1024:
_clean_cache(bot)
else:
print('using cache')
result = bot.memory['safety_cache'][trigger]
positives = result['positives']
total = result['total']
except Exception:
LOGGER.debug('Error from checking URL with VT.', exc_info=True)
pass # Ignoring exceptions with VT so MalwareDomains will always work
if str(netloc).lower() in malware_domains:
# malwaredomains is more trustworthy than some VT engines
# therefor it gets a weight of 10 engines when calculating confidence
positives += 10
total += 10
if positives > 1:
# Possibly malicious URL detected!
confidence = '{}%'.format(round((positives / total) * 100))
msg = 'link posted by %s is possibly malicious ' % bold(trigger.nick)
msg += '(confidence %s - %s/%s)' % (confidence, positives, total)
bot.say('[' + bold(color('WARNING', 'red')) + '] ' + msg)
if strict:
bot.write(['KICK', trigger.sender, trigger.nick,
'Posted a malicious link'])
@sopel.module.commands('safety')
def toggle_safety(bot, trigger):
""" Set safety setting for channel """
if not trigger.admin and bot.privileges[trigger.sender][trigger.nick] < OP:
bot.reply('Only channel operators can change safety settings')
return
allowed_states = ['strict', 'on', 'off', 'local', 'local strict']
if not trigger.group(2) or trigger.group(2).lower() not in allowed_states:
options = ' / '.join(allowed_states)
bot.reply('Available options: %s' % options)
return
channel = trigger.sender.lower()
bot.db.set_channel_value(channel, 'safety', trigger.group(2).lower())
bot.reply('Safety is now set to "%s" on this channel' % trigger.group(2))
# Clean the cache every day, also when > 1024 entries
@sopel.module.interval(24 * 60 * 60)
def _clean_cache(bot):
""" Cleanup old entries in URL cache """
# TODO probably should be using locks here, to make sure stuff doesn't
# explode
oldest_key_age = 0
oldest_key = ''
for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):
if data['age'] > oldest_key_age:
oldest_key_age = data['age']
oldest_key = key
if oldest_key in bot.memory['safety_cache']:
del bot.memory['safety_cache'][oldest_key]
|
the-stack_0_12147 | from __future__ import division
from collections import deque
import os
import warnings
import numpy as np
import keras.backend as K
import keras.optimizers as optimizers
from rl.core import Agent
from rl.random import OrnsteinUhlenbeckProcess
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
# Deep DPG as described by Lillicrap et al. (2015)
# http://arxiv.org/pdf/1509.02971v2.pdf
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.646.4324&rep=rep1&type=pdf
class DDPGAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, actor, critic, critic_action_input, memory,
gamma=.99, batch_size=32, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,
train_interval=1, memory_interval=1, delta_range=None, delta_clip=np.inf,
random_process=None, custom_model_objects={}, target_model_update=.001, **kwargs):
if hasattr(actor.output, '__len__') and len(actor.output) > 1:
raise ValueError('Actor "{}" has more than one output. DDPG expects an actor that has a single output.'.format(actor))
if hasattr(critic.output, '__len__') and len(critic.output) > 1:
raise ValueError('Critic "{}" has more than one output. DDPG expects a critic that has a single output.'.format(critic))
if critic_action_input not in critic.input:
raise ValueError('Critic "{}" does not have designated action input "{}".'.format(critic, critic_action_input))
if not hasattr(critic.input, '__len__') or len(critic.input) < 2:
raise ValueError('Critic "{}" does not have enough inputs. The critic must have at exactly two inputs, one for the action and one for the observation.'.format(critic))
super(DDPGAgent, self).__init__(**kwargs)
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\'re falling back to `delta_range[1] = {}`'.format(delta_range[1]))
delta_clip = delta_range[1]
# Parameters.
self.nb_actions = nb_actions
self.nb_steps_warmup_actor = nb_steps_warmup_actor
self.nb_steps_warmup_critic = nb_steps_warmup_critic
self.random_process = random_process
self.delta_clip = delta_clip
self.gamma = gamma
self.target_model_update = target_model_update
self.batch_size = batch_size
self.train_interval = train_interval
self.memory_interval = memory_interval
self.custom_model_objects = custom_model_objects
# Related objects.
self.actor = actor
self.critic = critic
self.critic_action_input = critic_action_input
self.critic_action_input_idx = self.critic.input.index(critic_action_input)
self.memory = memory
# State.
self.compiled = False
self.reset_states()
@property
def uses_learning_phase(self):
return self.actor.uses_learning_phase or self.critic.uses_learning_phase
def compile(self, optimizer, metrics=[]):
metrics += [mean_q]
if type(optimizer) in (list, tuple):
if len(optimizer) != 2:
raise ValueError('More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.')
actor_optimizer, critic_optimizer = optimizer
else:
actor_optimizer = optimizer
critic_optimizer = clone_optimizer(optimizer)
if type(actor_optimizer) is str:
actor_optimizer = optimizers.get(actor_optimizer)
if type(critic_optimizer) is str:
critic_optimizer = optimizers.get(critic_optimizer)
assert actor_optimizer != critic_optimizer
if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(metrics[1], '__len__'):
actor_metrics, critic_metrics = metrics
else:
actor_metrics = critic_metrics = metrics
def clipped_error(y_true, y_pred):
return K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
# Compile target networks. We only use them in feed-forward mode, hence we can pass any
# optimizer and loss since we never use it anyway.
self.target_actor = clone_model(self.actor, self.custom_model_objects)
self.target_actor.compile(optimizer='sgd', loss='mse')
self.target_critic = clone_model(self.critic, self.custom_model_objects)
self.target_critic.compile(optimizer='sgd', loss='mse')
# We also compile the actor. We never optimize the actor using Keras but instead compute
# the policy gradient ourselves. However, we need the actor in feed-forward mode, hence
# we also compile it with any optimzer and
self.actor.compile(optimizer='sgd', loss='mse')
# Compile the critic.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
critic_updates = get_soft_target_model_updates(self.target_critic, self.critic, self.target_model_update)
critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer, critic_updates)
self.critic.compile(optimizer=critic_optimizer, loss=clipped_error, metrics=critic_metrics)
# Combine actor and critic so that we can get the policy gradient.
# Assuming critic's state inputs are the same as actor's.
combined_inputs = []
critic_inputs = []
for i in self.critic.input:
if i == self.critic_action_input:
combined_inputs.append([])
else:
combined_inputs.append(i)
critic_inputs.append(i)
combined_inputs[self.critic_action_input_idx] = self.actor(critic_inputs)
combined_output = self.critic(combined_inputs)
updates = actor_optimizer.get_updates(self.actor.trainable_weights, self.actor.constraints,
loss=-K.mean(combined_output))
if self.target_model_update < 1.:
# Include soft target model updates.
updates += get_soft_target_model_updates(self.target_actor, self.actor, self.target_model_update)
updates += self.actor.updates # include other updates of the actor, e.g. for BN
# Finally, combine it all into a callable function.
if self.uses_learning_phase:
critic_inputs += [K.learning_phase()]
self.actor_train_fn = K.function(critic_inputs, [self.actor(critic_inputs)], updates=updates)
self.actor_optimizer = actor_optimizer
self.compiled = True
def load_weights(self, filepath):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.load_weights(actor_filepath)
self.critic.load_weights(critic_filepath)
self.update_target_models_hard()
def save_weights(self, filepath, overwrite=False):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.save_weights(actor_filepath, overwrite=overwrite)
self.critic.save_weights(critic_filepath, overwrite=overwrite)
def update_target_models_hard(self):
self.target_critic.set_weights(self.critic.get_weights())
self.target_actor.set_weights(self.actor.get_weights())
# TODO: implement pickle
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.actor.reset_states()
self.critic.reset_states()
self.target_actor.reset_states()
self.target_critic.reset_states()
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def select_action(self, state):
batch = self.process_state_batch([state])
action = self.actor.predict_on_batch(batch).flatten()
assert action.shape == (self.nb_actions,)
# Apply noise, if a random process is set.
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action.shape
action += noise
return action
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state) # TODO: move this into policy
if self.processor is not None:
action = self.processor.process_action(action)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.actor.layers[:] + self.critic.layers[:]
@property
def metrics_names(self):
names = self.critic.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def backward(self, reward, terminal=False):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
can_train_either = self.step > self.nb_steps_warmup_critic or self.step > self.nb_steps_warmup_actor
if can_train_either and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
# Update critic, if warm up is over.
if self.step > self.nb_steps_warmup_critic:
target_actions = self.target_actor.predict_on_batch(state1_batch)
assert target_actions.shape == (self.batch_size, self.nb_actions)
if len(self.critic.inputs) >= 3:
state1_batch_with_action = state1_batch[:]
else:
state1_batch_with_action = [state1_batch]
state1_batch_with_action.insert(self.critic_action_input_idx, target_actions)
target_q_values = self.target_critic.predict_on_batch(state1_batch_with_action).flatten()
assert target_q_values.shape == (self.batch_size,)
# Compute r_t + gamma * max_a Q(s_t+1, a) and update the target ys accordingly,
# but only for the affected output units (as given by action_batch).
discounted_reward_batch = self.gamma * target_q_values
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
targets = (reward_batch + discounted_reward_batch).reshape(self.batch_size, 1)
# Perform a single batch update on the critic network.
if len(self.critic.inputs) >= 3:
state0_batch_with_action = state0_batch[:]
else:
state0_batch_with_action = [state0_batch]
state0_batch_with_action.insert(self.critic_action_input_idx, action_batch)
metrics = self.critic.train_on_batch(state0_batch_with_action, targets)
if self.processor is not None:
metrics += self.processor.metrics
# Update actor, if warm up is over.
if self.step > self.nb_steps_warmup_actor:
# TODO: implement metrics for actor
if len(self.actor.inputs) >= 2:
inputs = state0_batch[:]
else:
inputs = [state0_batch]
if self.uses_learning_phase:
inputs += [self.training]
action_values = self.actor_train_fn(inputs)[0]
assert action_values.shape == (self.batch_size, self.nb_actions)
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_models_hard()
return metrics
|
the-stack_0_12148 | #!/usr/bin/env python
"""
A new .py file
"""
__author__ = 'ccluff'
from typing import List
def parse_arg(arg: str) -> List[float]:
"""for parsing cli args from str to python objects"""
arg = arg.replace('[', '').replace(']', '').replace(' ', '').split(',')
return list(map(float, arg))
|
the-stack_0_12153 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import re
import random
import shutil
__all__ = ['create_list']
def create_list(devkit_dir, years, output_dir):
"""
create following list:
1. trainval.txt
2. test.txt
"""
trainval_list = []
test_list = []
for year in years:
trainval, test = _walk_voc_dir(devkit_dir, year, output_dir)
trainval_list.extend(trainval)
test_list.extend(test)
random.shuffle(trainval_list)
with open(osp.join(output_dir, 'trainval.txt'), 'w') as ftrainval:
for item in trainval_list:
ftrainval.write(item[0] + ' ' + item[1] + '\n')
with open(osp.join(output_dir, 'test.txt'), 'w') as fval:
ct = 0
for item in test_list:
ct += 1
fval.write(item[0] + ' ' + item[1] + '\n')
def _get_voc_dir(devkit_dir, year, type):
return osp.join(devkit_dir, 'VOC' + year, type)
def _walk_voc_dir(devkit_dir, year, output_dir):
filelist_dir = _get_voc_dir(devkit_dir, year, 'ImageSets/Main')
annotation_dir = _get_voc_dir(devkit_dir, year, 'Annotations')
img_dir = _get_voc_dir(devkit_dir, year, 'JPEGImages')
trainval_list = []
test_list = []
added = set()
for _, _, files in os.walk(filelist_dir):
for fname in files:
img_ann_list = []
if re.match(r'[a-z]+_trainval\.txt', fname):
img_ann_list = trainval_list
elif re.match(r'[a-z]+_test\.txt', fname):
img_ann_list = test_list
else:
continue
fpath = osp.join(filelist_dir, fname)
for line in open(fpath):
name_prefix = line.strip().split()[0]
if name_prefix in added:
continue
added.add(name_prefix)
ann_path = osp.join(
osp.relpath(annotation_dir, output_dir),
name_prefix + '.xml')
img_path = osp.join(
osp.relpath(img_dir, output_dir), name_prefix + '.jpg')
img_ann_list.append((img_path, ann_path))
return trainval_list, test_list
|
the-stack_0_12155 | #!/usr/bin/env python
import math
import numpy
from matplotlib import pyplot
from mwa_pb import mwapb
def plot_beam(delays=numpy.zeros(16), gains=numpy.ones(16), stokes='I'):
t = numpy.mgrid[0:91,0:361]
el = t[0, :, :]
az = t[1, :, :]
dtor = math.pi / 180.0
theta = (90 - el) * dtor
phi = az * dtor
ll = numpy.sin(theta) * numpy.sin(phi)
m = numpy.sin(theta) * numpy.cos(phi)
tbeam = mwapb.MWA_tile_gain(freq=300e6, stokes=stokes, delays=delays, gains=gains)
pyplot.contourf(ll, m, tbeam.calculate(az, el), 256)
pyplot.draw()
if __name__ == "__main__":
pyplot.ion()
plot_beam()
pyplot.show()
|
the-stack_0_12156 | # -*- coding: latin-1 -*-
# -----------------------------------------------------------------------------
# Copyright 2009-2011 Stephen Tiedemann <[email protected]>
#
# Licensed under the EUPL, Version 1.1 or - as soon they
# will be approved by the European Commission - subsequent
# versions of the EUPL (the "Licence");
# You may not use this work except in compliance with the
# Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
# -----------------------------------------------------------------------------
#
# Negotiated Connection Handover - Server Base Class
#
import logging
log = logging.getLogger(__name__)
from threading import Thread
import nfc.llcp
class HandoverServer(Thread):
""" NFC Forum Connection Handover server
"""
def __init__(self, llc, request_size_limit=0x10000,
recv_miu=1984, recv_buf=15):
socket = nfc.llcp.Socket(llc, nfc.llcp.DATA_LINK_CONNECTION)
recv_miu = socket.setsockopt(nfc.llcp.SO_RCVMIU, recv_miu)
recv_buf = socket.setsockopt(nfc.llcp.SO_RCVBUF, recv_buf)
socket.bind('urn:nfc:sn:handover')
log.info("handover server bound to port {0} (MIU={1}, RW={2})"
.format(socket.getsockname(), recv_miu, recv_buf))
socket.listen(backlog=2)
Thread.__init__(self, name='urn:nfc:sn:handover',
target=self.listen, args=(llc, socket))
def listen(self, llc, socket):
log.debug("handover listen thread started")
try:
while True:
client_socket = socket.accept()
client_thread = Thread(target=HandoverServer.serve,
args=(client_socket, self))
client_thread.start()
except nfc.llcp.Error as e:
(log.debug if e.errno == nfc.llcp.errno.EPIPE else log.error)(e)
finally:
socket.close()
log.debug("handover listen thread terminated")
@staticmethod
def serve(socket, handover_server):
peer_sap = socket.getpeername()
log.info("serving handover client on remote sap {0}".format(peer_sap))
send_miu = socket.getsockopt(nfc.llcp.SO_SNDMIU)
try:
while True:
request_data = ''
while socket.poll("recv"):
data = socket.recv()
if data is not None:
request_data += data
try:
request = nfc.ndef.Message(request_data)
break # message complete
except nfc.ndef.LengthError:
continue # need more data
else: return # connection closed
else: return # connection closed
log.debug("<<< {0!r}".format(request_data))
response = handover_server._process_request(request)
response_data = str(response)
log.debug(">>> {0!r}".format(response_data))
while len(response_data) > 0:
if socket.send(response_data[0:send_miu]):
response_data = response_data[send_miu:]
else:
return # connection closed
except nfc.llcp.Error as e:
(log.debug if e.errno == nfc.llcp.errno.EPIPE else log.error)(e)
finally:
socket.close()
log.debug("handover serve thread terminated")
def _process_request(self, request):
log.debug("rcvd handover request {0}\n{1}"
.format(request.type, request.pretty()))
response = nfc.ndef.Message("\xd1\x02\x01Hs\x12")
if not request.type == 'urn:nfc:wkt:Hr':
log.error("received message which is not a handover request")
else:
try:
request = nfc.ndef.HandoverRequestMessage(request)
except nfc.ndef.DecodeError as e:
log.error("error decoding 'Hr' message: {0}".format(e))
else:
response = self.process_request(request)
log.debug("send handover response {0}\n{1}"
.format(response.type, response.pretty()))
return response
def process_request(self, request):
"""Process a handover request message. The *request* argument
is a :class:`nfc.ndef.HandoverRequestMessage` object. The
return value must be a :class:`nfc.ndef.HandoverSelectMessage`
object to be sent back to the client.
This method should be overwritten by a subclass of
:class:`HandoverServer` to customize it's behavior. The
default implementation returns a version ``1.2``
:class:`nfc.ndef.HandoverSelectMessage` with no carriers.
"""
log.warning("default process_request method should be overwritten")
return nfc.ndef.HandoverSelectMessage(version="1.2")
|
the-stack_0_12157 | # O(n) time | O(n) space
def minRewards(scores):
# Write your code here.
dp = [1] * len(scores)
for i in range(1, len(scores)):
if scores[i] > scores[i - 1]:
dp[i] = dp[i - 1] + 1
for j in reversed(range(len(scores) - 1)):
if scores[j + 1] < scores[j]:
dp[j] = max(dp[j], dp[j + 1] + 1)
return sum(dp)
|
the-stack_0_12158 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import os
from argparse import ArgumentParser
from os.path import isfile
from flask import Flask, jsonify, request
from flask_restful import reqparse
from PIL import Image
from teach.utils import dynamically_load_class, load_images
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = False
app.logger.info("initialize flask server")
def parse_args():
arg_parser = ArgumentParser()
arg_parser.add_argument(
"--data_dir",
type=str,
required=True,
help='Base data directory containing subfolders "games" and "edh_instances',
)
arg_parser.add_argument(
"--images_dir",
type=str,
required=True,
help="Images directory containing inference image output",
)
arg_parser.add_argument(
"--split",
type=str,
default="valid_seen",
choices=["train", "valid_seen", "valid_unseen", "test_seen", "test_unseen"],
help="One of train, valid_seen, valid_unseen, test_seen, test_unseen",
)
arg_parser.add_argument(
"--model_module",
type=str,
default="teach.inference.sample_model",
help="Path of the python module to load the model class from.",
)
arg_parser.add_argument(
"--model_class", type=str, default="SampleModel", help="Name of the TeachModel class to use during inference."
)
arg_parser.add_argument(
"--use_edh_file", dest="use_edh_file", action="store_true", help="Use edh file instead of request json."
)
arg_parser.add_argument(
"--use_img_file", dest="use_img_file", action="store_true", help="Use img file instead of request bytes."
)
return arg_parser.parse_known_args()
teach_args, model_args = parse_args()
model_class = dynamically_load_class(teach_args.model_module, teach_args.model_class)
process_index, num_processes = 1, 1
model = model_class(process_index, num_processes, model_args=model_args)
def _get_edh_instance(req_args):
if teach_args.use_edh_file:
if not req_args.edh_name:
return None, "request parameter edh_name does not have a value"
edh_instance_path = os.path.join(teach_args.data_dir, "edh_instances", teach_args.split, req_args.edh_name)
if not isfile(edh_instance_path):
return None, f"edh file={edh_instance_path} does not exist"
with open(edh_instance_path) as handle:
edh_instance = json.load(handle)
else:
edh_instance = json.loads(req_args.edh_instance)
return edh_instance, None
def _get_img(req_args):
if not req_args.img_name:
return None, "request parameter img_name does not have a value"
if teach_args.use_img_file:
img_path = os.path.join(teach_args.images_dir, req_args.img_name)
if not isfile(img_path):
return None, f"image file={img_path} does not exist"
img = Image.open(img_path)
else:
img_file = request.files.get("img")
if not img_file:
return None, f"image is not set in request with key='img'"
img = Image.open(img_file)
return img, None
def _get_edh_history_images(edh_name, edh_instance):
edh_history_images = []
history_file_names = edh_instance["driver_image_history"]
if not history_file_names:
return edh_history_images, None
try:
if not teach_args.use_img_file:
images = request.files.getlist("edh_history_images")
if images:
for img in images:
edh_history_images.append(Image.open(img))
if not edh_history_images:
image_dir = os.path.join(teach_args.data_dir, "images", teach_args.split, edh_instance["game_id"])
edh_history_images = load_images(image_dir, history_file_names)
except Exception:
err_msg = f"failed to load history images edh_name={edh_name}"
app.logger.error(err_msg, exc_info=True)
return None, err_msg
if not edh_history_images:
err_msg = f"history images are empty for edh_name={edh_name} for history_file_names={history_file_names}"
app.logger.error(err_msg)
return None, err_msg
return edh_history_images, None
@app.route("/get_next_action", methods=["POST"])
def get_next_action():
req_args = get_next_action_parse_args()
edh_instance, err_msg = _get_edh_instance(req_args)
if err_msg:
return err_msg, 500
img, err_msg = _get_img(req_args)
if err_msg:
return err_msg, 500
prev_action = json.loads(req_args.prev_action) if req_args.prev_action else None
try:
action, obj_relative_coord = model.get_next_action(img, edh_instance, prev_action)
except Exception as e:
err_msg = f"failed to get_next_action with edh_name={req_args.edh_name}"
app.logger.error(err_msg, exc_info=True)
return err_msg, 500
app.logger.debug(f"model.get_next_action returns action={action}, obj_relative_coord={obj_relative_coord}")
resp = jsonify(action=action, obj_relative_coord=obj_relative_coord)
return resp, 200
@app.route("/start_new_edh_instance", methods=["POST"])
def start_new_edh_instance():
req_args = start_new_edh_instance_parse_args()
app.logger.info(f"start_new_edh_instance with edh_name={req_args.edh_name}")
edh_instance, err_msg = _get_edh_instance(req_args)
if err_msg:
return err_msg, 500
edh_history_images, err_msg = _get_edh_history_images(req_args.edh_name, edh_instance)
if err_msg:
return err_msg, 500
try:
model.start_new_edh_instance(edh_instance, edh_history_images)
except Exception as e:
err_msg = f"failed to start_new_edh_instance with edh_name={req_args.edh_name}"
app.logger.error(err_msg, exc_info=True)
return err_msg, 500
return "success", 200
@app.route("/")
@app.route("/ping")
@app.route("/test")
def test():
resp = jsonify(action="Look Up", obj_relative_coord=[0.1, 0.2])
return resp, 200
def get_next_action_parse_args():
parser = reqparse.RequestParser()
parser.add_argument(
"img_name",
type=str,
help="Image name for PIL Image containing agent's egocentric image.",
)
parser.add_argument(
"edh_name",
type=str,
help="EDH instance file name.",
)
parser.add_argument(
"prev_action",
type=str,
help="One of None or a dict with keys 'action' and 'obj_relative_coord' containing returned values.",
)
parser.add_argument(
"edh_instance",
type=str,
help="One of None or a dict with keys 'action' and 'obj_relative_coord' containing returned values.",
)
args = parser.parse_args()
return args
def start_new_edh_instance_parse_args():
parser = reqparse.RequestParser()
parser.add_argument(
"edh_name",
type=str,
help="EDH instance file name.",
)
parser.add_argument(
"edh_instance",
type=str,
help="One of None or a dict with keys 'action' and 'obj_relative_coord' containing returned values.",
)
args = parser.parse_args()
return args
def main():
app.run(host="0.0.0.0", port=5000)
app.logger.info("started flask server")
if __name__ == "__main__":
main()
|
the-stack_0_12159 | #! /usr/bin/env python3
"""Advent of Code: Day 10
Author: Benjamin Jung
"""
import numpy as np
from collections import Counter
from itertools import combinations
adapters = np.genfromtxt('test.txt')
max_rated = np.max(adapters) + 3
adapters = np.array(list(adapters) + [0, max_rated])
"""Part 1"""
chained_adapters = np.sort(adapters)
differences = []
for i, j in zip(chained_adapters, chained_adapters[1:]):
differences.append(j - i)
counts = Counter(differences)
print('Part 1:', counts[1] * counts[3])
print(counts)
"""Part 2"""
def factorial(n):
if n < 2:
return 1
else:
return n * factorial(n-1)
count_string = ''.join([str(int(n)) for n in differences])
count_ones = count_string.split('3')
combinations = 1
lengths_of_onesteps = []
for n in count_ones:
lengths_of_onesteps.append(len(n))
print(lengths_of_onesteps)
for n in lengths_of_onesteps:
#if n > 3:
# combinations *= factorial(n-1)
#else:
# combinations *= factorial(n)
if n == 0:
combinations *= 1
elif n == 1:
combinations *= 1
elif n == 2:
combinations *= 2
elif n == 3:
combinations *= 5
elif n== 4:
combinations *= 7
print(combinations)
print('Combinations:', factorial(counts[1]) / factorial(2))
print('-'*30)
print(len(differences))
print(len(chained_adapters))
num_droppable_2s = 0
num_droppable_1s = 0
for dif, next in zip(differences, differences[1:]):
if dif == 2 and next == 1:
num_droppable_2s +=1
elif dif == 1 and next == 2:
num_droppable_1s +=1
elif dif == 1 and next == 1:
num_droppable_1s += 1
print(num_droppable_2s)
print('Pow:',2**num_droppable_2s)
print(num_droppable_1s)
print('Pow:', 2**num_droppable_1s)
print(np.log(19208)/np.log(2))
|
the-stack_0_12160 | #!/usr/bin/env python
from setuptools import setup, find_packages
config = {
'name': 'stomasimulator',
'description': 'Perform biomechanical simulations of stomata',
'long_description': open('README.md').read(),
'author': 'Hugh C. Woolfenden',
'author_email': '[email protected]',
'url': 'https://github.com/woolfeh/stomasimulator',
'download_url': 'https://github.com/woolfeh/stomasimulator',
'install_requires': ['pytest'],
'packages': find_packages(),
'version': '0.1.0',
'scripts': ['bin/stomasimulator'],
'license': open('LICENSE').read(),
'include_package_data': True
}
setup(**config)
|
the-stack_0_12163 | # Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import os
import numpy as np
from gym.spaces import Box, Dict
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.recurrent_tf_modelv2 import (
RecurrentTFModelV2,
add_time_dimension,
)
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.utils import try_import_tf
from tensorflow import keras
# Disable TF INFO, WARNING, and ERROR messages
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
tf = try_import_tf()
_WORLD_MAP_NAME = "world-map"
_WORLD_IDX_MAP_NAME = "world-idx_map"
_MASK_NAME = "action_mask"
def get_flat_obs_size(obs_space):
if isinstance(obs_space, Box):
return np.prod(obs_space.shape)
elif not isinstance(obs_space, Dict):
raise TypeError
def rec_size(obs_dict_space, n=0):
for subspace in obs_dict_space.spaces.values():
if isinstance(subspace, Box):
n = n + np.prod(subspace.shape)
elif isinstance(subspace, Dict):
n = rec_size(subspace, n=n)
else:
raise TypeError
return n
return rec_size(obs_space)
def apply_logit_mask(logits, mask):
"""Mask values of 1 are valid actions."
" Add huge negative values to logits with 0 mask values."""
logit_mask = tf.ones_like(logits) * -10000000
logit_mask = logit_mask * (1 - mask)
return logits + logit_mask
class KerasConvLSTM(RecurrentTFModelV2):
"""
The model used in the paper "The AI Economist: Optimal Economic Policy
Design via Two-level Deep Reinforcement Learning"
(https://arxiv.org/abs/2108.02755)
We combine convolutional, fully connected, and recurrent layers to process
spatial, non-spatial, and historical information, respectively.
For recurrent components, each agent maintains its own hidden state.
"""
custom_name = "keras_conv_lstm"
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, num_outputs, model_config, name)
input_emb_vocab = self.model_config["custom_options"]["input_emb_vocab"]
emb_dim = self.model_config["custom_options"]["idx_emb_dim"]
num_conv = self.model_config["custom_options"]["num_conv"]
num_fc = self.model_config["custom_options"]["num_fc"]
fc_dim = self.model_config["custom_options"]["fc_dim"]
cell_size = self.model_config["custom_options"]["lstm_cell_size"]
generic_name = self.model_config["custom_options"].get("generic_name", None)
self.cell_size = cell_size
if hasattr(obs_space, "original_space"):
obs_space = obs_space.original_space
if not isinstance(obs_space, Dict):
if isinstance(obs_space, Box):
raise TypeError(
"({}) Observation space should be a gym Dict."
" Is a Box of shape {}".format(name, obs_space.shape)
)
raise TypeError(
"({}) Observation space should be a gym Dict."
" Is {} instead.".format(name, type(obs_space))
)
# Define input layers
self._input_keys = []
non_conv_input_keys = []
input_dict = {}
conv_shape_r = None
conv_shape_c = None
conv_map_channels = None
conv_idx_channels = None
found_world_map = False
found_world_idx = False
for k, v in obs_space.spaces.items():
shape = (None,) + v.shape
input_dict[k] = tf.keras.layers.Input(shape=shape, name=k)
self._input_keys.append(k)
if k == _MASK_NAME:
pass
elif k == _WORLD_MAP_NAME:
conv_shape_r, conv_shape_c, conv_map_channels = (
v.shape[1],
v.shape[2],
v.shape[0],
)
found_world_map = True
elif k == _WORLD_IDX_MAP_NAME:
conv_idx_channels = v.shape[0] * emb_dim
found_world_idx = True
else:
non_conv_input_keys.append(k)
# Cell state and hidden state for the
# policy and value function networks.
state_in_h_p = tf.keras.layers.Input(shape=(cell_size,), name="h_pol")
state_in_c_p = tf.keras.layers.Input(shape=(cell_size,), name="c_pol")
state_in_h_v = tf.keras.layers.Input(shape=(cell_size,), name="h_val")
state_in_c_v = tf.keras.layers.Input(shape=(cell_size,), name="c_val")
seq_in = tf.keras.layers.Input(shape=(), name="seq_in")
# Determine which of the inputs are treated as non-conv inputs
if generic_name is None:
non_conv_inputs = tf.keras.layers.concatenate(
[input_dict[k] for k in non_conv_input_keys]
)
elif isinstance(generic_name, (tuple, list)):
non_conv_inputs = tf.keras.layers.concatenate(
[input_dict[k] for k in generic_name]
)
elif isinstance(generic_name, str):
non_conv_inputs = input_dict[generic_name]
else:
raise TypeError
if found_world_map:
assert found_world_idx
use_conv = True
conv_shape = (
conv_shape_r,
conv_shape_c,
conv_map_channels + conv_idx_channels,
)
conv_input_map = tf.keras.layers.Permute((1, 3, 4, 2))(
input_dict[_WORLD_MAP_NAME]
)
conv_input_idx = tf.keras.layers.Permute((1, 3, 4, 2))(
input_dict[_WORLD_IDX_MAP_NAME]
)
else:
assert not found_world_idx
use_conv = False
conv_shape = None
conv_input_map = None
conv_input_idx = None
logits, values, state_h_p, state_c_p, state_h_v, state_c_v = (
None,
None,
None,
None,
None,
None,
)
# Define the policy and value function models
for tag in ["_pol", "_val"]:
if tag == "_pol":
state_in = [state_in_h_p, state_in_c_p]
elif tag == "_val":
state_in = [state_in_h_v, state_in_c_v]
else:
raise NotImplementedError
# Apply convolution to the spatial inputs
if use_conv:
map_embedding = tf.keras.layers.Embedding(
input_emb_vocab, emb_dim, name="embedding" + tag
)
conv_idx_embedding = tf.keras.layers.Reshape(
(-1, conv_shape_r, conv_shape_c, conv_idx_channels)
)(map_embedding(conv_input_idx))
conv_input = tf.keras.layers.concatenate(
[conv_input_map, conv_idx_embedding]
)
conv_model = tf.keras.models.Sequential(name="conv_model" + tag)
assert conv_shape
conv_model.add(
tf.keras.layers.Conv2D(
16,
(3, 3),
strides=2,
activation="relu",
input_shape=conv_shape,
name="conv2D_1" + tag,
)
)
for i in range(num_conv - 1):
conv_model.add(
tf.keras.layers.Conv2D(
32,
(3, 3),
strides=2,
activation="relu",
name="conv2D_{}{}".format(i + 2, tag),
)
)
conv_model.add(tf.keras.layers.Flatten())
conv_td = tf.keras.layers.TimeDistributed(conv_model)(conv_input)
# Combine the conv output with the non-conv inputs
dense = tf.keras.layers.concatenate([conv_td, non_conv_inputs])
# No spatial inputs provided -- skip any conv steps
else:
dense = non_conv_inputs
# Preprocess observation with hidden layers and send to LSTM cell
for i in range(num_fc):
layer = tf.keras.layers.Dense(
fc_dim, activation=tf.nn.relu, name="dense{}".format(i + 1) + tag
)
dense = layer(dense)
dense = tf.keras.layers.LayerNormalization(name="layer_norm" + tag)(dense)
lstm_out, state_h, state_c = tf.keras.layers.LSTM(
cell_size, return_sequences=True, return_state=True, name="lstm" + tag
)(inputs=dense, mask=tf.sequence_mask(seq_in), initial_state=state_in)
# Project LSTM output to logits or value
output = tf.keras.layers.Dense(
self.num_outputs if tag == "_pol" else 1,
activation=tf.keras.activations.linear,
name="logits" if tag == "_pol" else "value",
)(lstm_out)
if tag == "_pol":
state_h_p, state_c_p = state_h, state_c
logits = apply_logit_mask(output, input_dict[_MASK_NAME])
elif tag == "_val":
state_h_v, state_c_v = state_h, state_c
values = output
else:
raise NotImplementedError
self.input_dict = input_dict
# This will be set in the forward_rnn() call below
self._value_out = None
for out in [logits, values, state_h_p, state_c_p, state_h_v, state_c_v]:
assert out is not None
# Create the RNN model
self.rnn_model = tf.keras.Model(
inputs=self._extract_input_list(input_dict)
+ [seq_in, state_in_h_p, state_in_c_p, state_in_h_v, state_in_c_v],
outputs=[logits, values, state_h_p, state_c_p, state_h_v, state_c_v],
)
self.register_variables(self.rnn_model.variables)
# self.rnn_model.summary()
def _extract_input_list(self, dictionary):
return [dictionary[k] for k in self._input_keys]
def forward(self, input_dict, state, seq_lens):
"""Adds time dimension to batch before sending inputs to forward_rnn().
You should implement forward_rnn() in your subclass."""
output, new_state = self.forward_rnn(
[
add_time_dimension(t, seq_lens)
for t in self._extract_input_list(input_dict["obs"])
],
state,
seq_lens,
)
return tf.reshape(output, [-1, self.num_outputs]), new_state
def forward_rnn(self, inputs, state, seq_lens):
model_out, self._value_out, h_p, c_p, h_v, c_v = self.rnn_model(
inputs + [seq_lens] + state
)
return model_out, [h_p, c_p, h_v, c_v]
def get_initial_state(self):
return [
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
]
def value_function(self):
return tf.reshape(self._value_out, [-1])
ModelCatalog.register_custom_model(KerasConvLSTM.custom_name, KerasConvLSTM)
class KerasLinear(TFModelV2):
"""A linear (feed-forward) model."""
custom_name = "keras_linear"
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, num_outputs, model_config, name)
self.MASK_NAME = "action_mask"
mask = obs_space.original_space.spaces[self.MASK_NAME]
mask_input = tf.keras.layers.Input(shape=mask.shape, name=self.MASK_NAME)
custom_options = model_config["custom_options"]
if custom_options.get('fully_connected_value', False):
self.fc_dim = int(custom_options["fc_dim"])
self.num_fc = int(custom_options["num_fc"])
else:
self.fc_dim = 0
self.num_fc = 0
self.inputs = [
tf.keras.layers.Input(
shape=(get_flat_obs_size(obs_space),), name="observations"
),
mask_input,
]
logits = tf.keras.layers.Dense(
self.num_outputs, activation=tf.keras.activations.linear, name="logits"
)(self.inputs[0])
logits = apply_logit_mask(logits, mask_input)
if custom_options.get('fully_connected_value', False):
# Value function is fully connected
fc_layers_val = keras.Sequential(name='fc_layers_val')
for i in range(self.num_fc):
fc_layers_val.add(
keras.layers.Dense(self.fc_dim,
activation=tf.nn.relu,
name="fc_layers_val-{}".format(i))
)
h_val = fc_layers_val(self.inputs[0])
values = tf.keras.layers.Dense(
1, activation=tf.keras.activations.linear, name="values"
)(h_val)
else:
# Value function is linear
values = tf.keras.layers.Dense(
1, activation=tf.keras.activations.linear, name="values"
)(self.inputs[0])
self.base_model = tf.keras.Model(self.inputs, [logits, values])
self.register_variables(self.base_model.variables)
def forward(self, input_dict, state, seq_lens):
model_out, self._value_out = self.base_model(
[input_dict["obs_flat"], input_dict["obs"][self.MASK_NAME]]
)
return model_out, state
def value_function(self):
return tf.reshape(self._value_out, [-1])
ModelCatalog.register_custom_model(KerasLinear.custom_name, KerasLinear)
class RandomAction(TFModelV2):
"""
A "random" model to sample actions from an action space at random.
This is used when not training an agent.
"""
custom_name = "random"
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, num_outputs, model_config, name)
if hasattr(obs_space, "original_space"):
original_space = obs_space.original_space
else:
assert isinstance(obs_space, Dict)
original_space = obs_space
mask = original_space.spaces[_MASK_NAME]
mask_input = keras.layers.Input(shape=mask.shape, name=_MASK_NAME)
self.inputs = [
keras.layers.Input(shape=(1,), name="observations"),
mask_input,
]
logits_and_value = keras.layers.Dense(
num_outputs + 1, activation=None, name="dummy_layer"
)(self.inputs[0])
unmasked_logits = logits_and_value[:, :num_outputs] * 0.0
values = logits_and_value[:, -1]
masked_logits = apply_logit_mask(unmasked_logits, mask_input)
self.base_model = keras.Model(self.inputs, [masked_logits, values])
self.register_variables(self.base_model.variables)
# This will be set in the forward() call below
self.values = None
def forward(self, input_dict, state, seq_lens):
model_out, self.values = self.base_model(
[input_dict["obs_flat"][:, :1], input_dict["obs"][_MASK_NAME]]
)
return model_out, state
def value_function(self):
return tf.reshape(self.values, [-1])
ModelCatalog.register_custom_model(RandomAction.custom_name, RandomAction)
|
the-stack_0_12164 | import unittest
from sample import textFile
import os, fnmatch
class TestTextFile(unittest.TestCase):
def test_list_not_null(self):
"""
Test that the module textFile return a not null list
"""
auxTest = textFile.TextFile('ej.txt')
list = auxTest.listUrls()
self.assertTrue(len(list)>1)
def test_filename_wrong(self):
filename = "banana"
auxTest = textFile.TextFile(filename)
with self.assertRaises(FileNotFoundError):
list = auxTest.listUrls()
def test_filename_null(self):
with self.assertRaises(TypeError):
auxTest = textFile.TextFile()
if __name__ == '__main__':
unittest.main() |
the-stack_0_12166 | """@desc
Parser for ask search results
"""
from search_engine_parser.core.base import BaseSearch, ReturnType, SearchItem
class Search(BaseSearch):
"""
Searches Ask for string
"""
name = "Ask"
search_url = "https://www.ask.com/web?"
summary = "\t Formerly known as Ask Jeeves, Ask.com receives approximately 0.42% of the search"\
" share. ASK is based on a question/answer format where most questions are answered by "\
"other users or are in the form of polls.\nIt also has the general search functionality "\
"but the results returned lack quality compared to Google or even Bing and Yahoo."
def get_params(self, query=None, page=None, offset=None, **kwargs):
params = {}
params["o"] = 0
params["l"] = "dir"
params["qo"] = "pagination"
params["q"] = query
params["qsrc"] = 998
params["page"] = page
return params
def parse_soup(self, soup):
"""
Parses Ask Search Soup for results
"""
# find all class_='PartialSearchResults-item' => each result
return soup.find_all('div', class_="PartialSearchResults-item")
def parse_single_result(self, single_result, return_type=ReturnType.FULL, **kwargs):
"""
Parses the source code to return
:param single_result: single result found in <div class="PartialSearchResults-item">
:type single_result: `bs4.element.ResultSet`
:return: parsed title, link and description of single result
:rtype: str, str, str
"""
rdict = SearchItem()
if return_type in (ReturnType.FULL, return_type.TITLE):
rdict["titles"] = single_result.find('a').text
if return_type in (ReturnType.FULL, return_type.TITLE):
rdict["links"] = single_result.a["href"]
if return_type in (ReturnType.FULL, return_type.TITLE):
rdict["descriptions"] = single_result.find(
'p', class_="PartialSearchResults-item-abstract").text
return rdict
|
the-stack_0_12167 | import os
import setuptools
from pkg_resources import DistributionNotFound, get_distribution
#with open("README.md", "r") as fh:
# long_description = fh.read()
def get_dist(package_name):
try:
return get_distribution(package_name)
except DistributionNotFound:
return None
install_requires=['numpy', 'Pillow', 'fontTools']
if get_dist('opencv_python') is None and get_dist('opencv_contrib_python') is None:
install_requires.append('opencv_contrib_python')
# font packages
install_requires.extend(['cvttf-NotoSansCJK-Black', 'cvttf-NotoSansCJK-Bold', 'cvttf-NotoSansCJK-DemiLight',
'cvttf-NotoSansCJK-Light', 'cvttf-NotoSansCJK-Medium', 'cvttf-NotoSansCJK-Regular',
'cvttf-NotoSansCJK-Thin'])
# include files under cvttf/fonts into the package
font_dir = 'cvttf/fonts'
font_files = {}
for dirName, subDirs, files in os.walk(font_dir):
for file in files:
if os.path.isfile(dirName+'/'+file):
dirName = dirName.replace('\\', '/')
if dirName in font_files:
font_files[dirName].append(dirName+'/'+file)
else:
font_files[dirName] = [dirName+'/'+file]
setuptools.setup(
name="cvttf",
version="0.0.2",
author="Issac Lin",
author_email="[email protected]",
description="Draw text on OpenCV images using TTF/OTF fonts",
# long_description=long_description,
# long_description_content_type="text/markdown",
url="https://github.com/issaclin32/CVTTF",
packages=['cvttf'],
install_requires=install_requires,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
include_package_data=False,
data_files=font_files.items()
)
|
the-stack_0_12168 | import numpy as np
def numeric_grad_array(f, x, h):
"""
calculating numerical differentiation 2-point formula: (f(x+h) - f(x-h))/2h
source: https://en.wikipedia.org/wiki/Numerical_differentiation
Arguments:
f: function that receives x and computes value and gradient
x: np array, initial point where gradient is checked
h: small change in x to compute numerical gradient
Return:
numpy.nd.array of numerical gradient
"""
dx = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
x_plus_h, x_minus_h = x.copy(), x.copy()
x_plus_h[ix] += h
x_minus_h[ix] -= h
dx[ix] = (f(x_plus_h)[0] - f(x_minus_h)[0]) / (2 * h)
it.iternext()
return dx
def check_gradient(f, x, delta=1e-5, tol=1e-4):
'''
Checks the implementation of analytical gradient by comparing
it to numerical gradient using two-point formula
Arguments:
f: function that receives x and computes value and gradient
x: np array, initial point where gradient is checked
delta: step to compute numerical gradient
tol: tolerance for comparing numerical and analytical gradient
Return:
bool indicating whether gradients match or not
'''
assert isinstance(x, np.ndarray)
assert x.dtype == np.float
orig_x = x.copy()
fx, analytic_grad = f(x)
assert np.all(np.isclose(orig_x, x, tol)), "Functions shouldn't modify input variables"
assert analytic_grad.shape == x.shape
analytic_grad = analytic_grad.copy()
numeric_grad = numeric_grad_array(f, x, h=delta)
# We will go through every dimension of x and compute numeric
# derivative for it
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
analytic_grad_at_ix = analytic_grad[ix]
numeric_grad_at_ix = numeric_grad[ix]
if not np.isclose(numeric_grad_at_ix, analytic_grad_at_ix, tol):
print("Gradients are different at %s. Analytic: %2.5f, Numeric: %2.5f" % (ix, analytic_grad_at_ix,
numeric_grad_at_ix))
return False
it.iternext()
print("Gradient check passed!")
return True
def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):
"""
Checks gradient correctness for the input and output of a layer
Arguments:
layer: neural network layer, with forward and backward functions
x: starting point for layer input
delta: step to compute numerical gradient
tol: tolerance for comparing numerical and analytical gradient
Returns:
bool indicating whether gradients match or not
"""
output = layer.forward(x)
output_weight = np.random.randn(*output.shape)
def helper_func(x):
output = layer.forward(x)
loss = np.sum(output * output_weight)
d_out = np.ones_like(output) * output_weight
grad = layer.backward(d_out)
return loss, grad
return check_gradient(helper_func, x, delta, tol)
def check_layer_param_gradient(layer, x,
param_name,
delta=1e-5, tol=1e-4):
"""
Checks gradient correctness for the parameter of the layer
Arguments:
layer: neural network layer, with forward and backward functions
x: starting point for layer input
param_name: name of the parameter
delta: step to compute numerical gradient
tol: tolerance for comparing numerical and analytical gradient
Returns:
bool indicating whether gradients match or not
"""
param = layer.params()[param_name]
initial_w = param.value
output = layer.forward(x)
output_weight = np.random.randn(*output.shape)
def helper_func(w):
param.value = w
output = layer.forward(x)
loss = np.sum(output * output_weight)
d_out = np.ones_like(output) * output_weight
layer.backward(d_out)
grad = param.grad
return loss, grad
return check_gradient(helper_func, initial_w, delta, tol)
def check_model_gradient(model, X, y,
delta=1e-5, tol=1e-4):
"""
Checks gradient correctness for all model parameters
Arguments:
model: neural network model with compute_loss_and_gradients
X: batch of input data
y: batch of labels
delta: step to compute numerical gradient
tol: tolerance for comparing numerical and analytical gradient
Returns:
bool indicating whether gradients match or not
"""
params = model.params()
for param_key in params:
print("Checking gradient for %s" % param_key)
param = params[param_key]
initial_w = param.value
def helper_func(w):
param.value = w
loss = model.compute_loss_and_gradients(X, y)
grad = param.grad
return loss, grad
if not check_gradient(helper_func, initial_w, delta, tol):
return False
return True
|
the-stack_0_12169 | import os
import numpy as np
import tensorflow as tf
from resnet101 import ResNet101
filePath = "/home/luca/PycharmProjects/deeplab_113/deeplab_resnet.ckpt"
def get_filename(key):
"""Rename tensor name to the corresponding Keras layer weight name.
# Arguments
key: tensor name in TF (determined by tf.variable_scope)
"""
filename = str(key)
filename = filename.replace('/', '_')
filename = filename.replace('MobilenetV2_', '')
filename = filename.replace('BatchNorm', 'BN')
if 'Momentum' in filename:
return None
# from TF to Keras naming
filename = filename.replace('_weights', '_kernel')
filename = filename.replace('_biases', '_bias')
return filename + '.npy'
def extract_tensors_from_checkpoint_file(filename, output_folder='weights'):
"""Extract tensors from a TF checkpoint file.
# Arguments
filename: TF checkpoint file
output_folder: where to save the output numpy array files
"""
if not os.path.exists(output_folder):
os.makedirs(output_folder)
reader = tf.train.NewCheckpointReader(filename)
keys = reader.get_variable_to_shape_map()
f1 = open('./weights/testfile', 'w+')
for key in keys:
# convert tensor name into the corresponding Keras layer weight name and save
filename = get_filename(key)
if filename:
path = os.path.join(output_folder, filename)
arr = reader.get_tensor(key)
np.save(path, arr)
print(filename, file=f1)
if not os.path.exists("./weights/resnet_deeplab"):
os.makedirs("./weights/resnet_deeplab")
extract_tensors_from_checkpoint_file(
filePath, output_folder='./weights/resnet_deeplab')
print('Instantiating an empty model...')
model = ResNet101()
WEIGHTS_DIR = '/home/luca/PycharmProjects/deeplab_113/Segmentation_model/weights/resnet_deeplab/'
print('Loading weights from', WEIGHTS_DIR)
layer_model = model.layers
for layer in layer_model:
if layer.weights:
weights = []
for w in layer.weights:
weight_name = os.path.basename(w.name).replace(':0', '')
weight_file = layer.name + '_' + weight_name + '.npy'
weight_arr = np.load(os.path.join(WEIGHTS_DIR, weight_file))
weights.append(weight_arr)
layer.set_weights(weights)
print('Saving model weights...')
OUTPUT_WEIGHT_FILENAME = 'deeplabV2_resnet101_tf_dim_ordering_tf_kernels.h5'
if not os.path.exists("./weights/resnet_deeplab_model"):
os.makedirs("./weights/resnet_deeplab_model")
model.save_weights(os.path.join("./weights/resnet_deeplab_model", OUTPUT_WEIGHT_FILENAME))
# lys = deeplab_model.layers
# f1 = open('./prova', 'w+')
# for layer in lys:
# if layer.weights:
# weights = []
# for w in layer.weights:
# print(w, file=f1) |
the-stack_0_12172 | from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.db import transaction
from django.db.models import F, Q
from django.forms import modelformset_factory
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.utils.translation import npgettext_lazy, pgettext_lazy
from django.views.decorators.http import require_POST
from django_prices.templatetags import prices_i18n
from ...core.exceptions import InsufficientStock
from ...core.utils import get_paginator_items
from ...core.utils.taxes import get_taxes_for_address
from ...order import OrderStatus, events
from ...order.emails import (
send_fulfillment_confirmation_to_customer,
send_fulfillment_update,
send_order_confirmation,
)
from ...order.models import Fulfillment, FulfillmentLine, Order
from ...order.utils import update_order_prices, update_order_status
from ...shipping.models import ShippingMethod
from ..views import staff_member_required
from .filters import OrderFilter
from .forms import (
AddressForm,
AddVariantToOrderForm,
BaseFulfillmentLineFormSet,
CancelFulfillmentForm,
CancelOrderForm,
CancelOrderLineForm,
CapturePaymentForm,
ChangeQuantityForm,
CreateOrderFromDraftForm,
FulfillmentForm,
FulfillmentLineForm,
FulfillmentTrackingNumberForm,
OrderCustomerForm,
OrderEditDiscountForm,
OrderEditVoucherForm,
OrderMarkAsPaidForm,
OrderNoteForm,
OrderRemoveCustomerForm,
OrderRemoveShippingForm,
OrderRemoveVoucherForm,
OrderShippingForm,
RefundPaymentForm,
VoidPaymentForm,
)
from .utils import (
create_invoice_pdf,
create_packing_slip_pdf,
get_statics_absolute_url,
save_address_in_order,
)
@staff_member_required
@permission_required("order.manage_orders")
def order_list(request):
orders = Order.objects.prefetch_related("payments", "lines", "user")
order_filter = OrderFilter(request.GET, queryset=orders)
orders = get_paginator_items(
order_filter.qs, settings.DASHBOARD_PAGINATE_BY, request.GET.get("page")
)
ctx = {
"orders": orders,
"filter_set": order_filter,
"is_empty": not order_filter.queryset.exists(),
}
return TemplateResponse(request, "dashboard/order/list.html", ctx)
@require_POST
@staff_member_required
@permission_required("order.manage_orders")
def order_create(request):
display_gross_prices = request.site.settings.display_gross_prices
msg = pgettext_lazy("Dashboard message related to an order", "Draft order created")
order = Order.objects.create(
status=OrderStatus.DRAFT, display_gross_prices=display_gross_prices
)
# Create the draft creation event
events.draft_order_created_event(order=order, user=request.user)
# Send success message and redirect to the draft details
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
@staff_member_required
@permission_required("order.manage_orders")
def create_order_from_draft(request, order_pk):
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
status = 200
form = CreateOrderFromDraftForm(request.POST or None, instance=order)
if form.is_valid():
form.save()
msg = pgettext_lazy(
"Dashboard message related to an order", "Order created from draft order"
)
events.order_created_event(order=order, user=request.user, from_draft=True)
messages.success(request, msg)
if form.cleaned_data.get("notify_customer"):
send_order_confirmation.delay(order.pk, request.user.pk)
return redirect("dashboard:order-details", order_pk=order.pk)
elif form.errors:
status = 400
template = "dashboard/order/modal/create_order.html"
ctx = {"form": form, "order": order}
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
@permission_required("order.manage_orders")
def remove_draft_order(request, order_pk):
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
if request.method == "POST":
order.delete()
msg = pgettext_lazy("Dashboard message", "Draft order successfully removed")
messages.success(request, msg)
return redirect("dashboard:orders")
template = "dashboard/order/modal/remove_order.html"
ctx = {"order": order}
return TemplateResponse(request, template, ctx)
@staff_member_required
@permission_required("order.manage_orders")
def order_details(request, order_pk):
qs = Order.objects.select_related(
"user", "shipping_address", "billing_address"
).prefetch_related(
"payments__transactions",
"events__user",
"lines__variant__product",
"fulfillments__lines__order_line",
)
order = get_object_or_404(qs, pk=order_pk)
all_payments = order.payments.order_by("-pk").all()
payment = order.get_last_payment()
ctx = {
"order": order,
"all_payments": all_payments,
"payment": payment,
"notes": order.events.filter(type=events.OrderEvents.NOTE_ADDED),
"events": order.events.order_by("-date").all(),
"order_fulfillments": order.fulfillments.all(),
}
return TemplateResponse(request, "dashboard/order/detail.html", ctx)
@staff_member_required
@permission_required("order.manage_orders")
def order_add_note(request, order_pk):
order = get_object_or_404(Order, pk=order_pk)
form = OrderNoteForm(request.POST or None)
status = 200
if form.is_valid():
events.order_note_added_event(
order=order, user=request.user, message=form.cleaned_data["message"]
)
msg = pgettext_lazy("Dashboard message related to an order", "Added note")
messages.success(request, msg)
elif form.errors:
status = 400
ctx = {"order": order, "form": form}
ctx.update(csrf(request))
template = "dashboard/order/modal/add_note.html"
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
@permission_required("order.manage_orders")
def capture_payment(request, order_pk, payment_pk):
orders = Order.objects.confirmed().prefetch_related("payments")
order = get_object_or_404(orders.prefetch_related("lines", "user"), pk=order_pk)
payment = get_object_or_404(order.payments, pk=payment_pk)
amount = order.total.gross
form = CapturePaymentForm(
request.POST or None, payment=payment, initial={"amount": amount.amount}
)
if form.is_valid() and form.capture(request.user):
msg = pgettext_lazy(
"Dashboard message related to a payment", "Captured %(amount)s"
) % {"amount": prices_i18n.amount(amount)}
events.payment_captured_event(
order=order, user=request.user, amount=amount.amount, payment=payment
)
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
status = 400 if form.errors else 200
ctx = {"captured": amount, "form": form, "order": order, "payment": payment}
return TemplateResponse(
request, "dashboard/order/modal/capture.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def refund_payment(request, order_pk, payment_pk):
orders = Order.objects.confirmed().prefetch_related("payments")
order = get_object_or_404(orders, pk=order_pk)
payment = get_object_or_404(order.payments, pk=payment_pk)
amount = payment.captured_amount
form = RefundPaymentForm(
request.POST or None, payment=payment, initial={"amount": amount}
)
if form.is_valid() and form.refund(request.user):
amount = form.cleaned_data["amount"]
msg = pgettext_lazy(
"Dashboard message related to a payment", "Refunded %(amount)s"
) % {"amount": prices_i18n.amount(payment.get_captured_amount())}
events.payment_refunded_event(
order=order, user=request.user, amount=amount, payment=payment
)
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
status = 400 if form.errors else 200
ctx = {
"captured": payment.get_captured_amount(),
"form": form,
"order": order,
"payment": payment,
}
return TemplateResponse(
request, "dashboard/order/modal/refund.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def void_payment(request, order_pk, payment_pk):
orders = Order.objects.confirmed().prefetch_related("payments")
order = get_object_or_404(orders, pk=order_pk)
payment = get_object_or_404(order.payments, pk=payment_pk)
form = VoidPaymentForm(request.POST or None, payment=payment)
if form.is_valid() and form.void(request.user):
msg = pgettext_lazy("Dashboard message", "Voided payment")
events.payment_voided_event(order=order, user=request.user, payment=payment)
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
status = 400 if form.errors else 200
ctx = {"form": form, "order": order, "payment": payment}
return TemplateResponse(
request, "dashboard/order/modal/void.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def orderline_change_quantity(request, order_pk, line_pk):
orders = Order.objects.drafts().prefetch_related("lines")
order = get_object_or_404(orders, pk=order_pk)
line = get_object_or_404(order.lines, pk=line_pk)
form = ChangeQuantityForm(request.POST or None, instance=line)
status = 200
old_quantity = line.quantity
if form.is_valid():
msg = pgettext_lazy(
"Dashboard message related to an order line",
"Changed quantity for variant %(variant)s from"
" %(old_quantity)s to %(new_quantity)s",
) % {
"variant": line.variant,
"old_quantity": old_quantity,
"new_quantity": line.quantity,
}
with transaction.atomic():
form.save(request.user)
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
elif form.errors:
status = 400
ctx = {"order": order, "object": line, "form": form}
template = "dashboard/order/modal/change_quantity.html"
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
@permission_required("order.manage_orders")
def orderline_cancel(request, order_pk, line_pk):
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
line = get_object_or_404(order.lines, pk=line_pk)
form = CancelOrderLineForm(data=request.POST or None, line=line)
status = 200
if form.is_valid():
msg = (
pgettext_lazy(
"Dashboard message related to an order line", "Canceled item %s"
)
% line
)
with transaction.atomic():
form.cancel_line(request.user)
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
elif form.errors:
status = 400
ctx = {"order": order, "item": line, "form": form}
return TemplateResponse(
request, "dashboard/order/modal/cancel_line.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def add_variant_to_order(request, order_pk):
"""Add variant in given quantity to an order."""
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
taxes = get_taxes_for_address(order.shipping_address)
form = AddVariantToOrderForm(
request.POST or None, order=order, discounts=request.discounts, taxes=taxes
)
status = 200
if form.is_valid():
msg_dict = {
"quantity": form.cleaned_data.get("quantity"),
"variant": form.cleaned_data.get("variant"),
}
try:
with transaction.atomic():
form.save(request.user)
msg = (
pgettext_lazy(
"Dashboard message related to an order",
"Added %(quantity)d x %(variant)s",
)
% msg_dict
)
messages.success(request, msg)
except InsufficientStock:
msg = (
pgettext_lazy(
"Dashboard message related to an order",
"Insufficient stock: could not add %(quantity)d x %(variant)s",
)
% msg_dict
)
messages.warning(request, msg)
return redirect("dashboard:order-details", order_pk=order_pk)
elif form.errors:
status = 400
ctx = {"order": order, "form": form}
template = "dashboard/order/modal/add_variant_to_order.html"
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
@permission_required("order.manage_orders")
def order_address(request, order_pk, address_type):
order = get_object_or_404(Order, pk=order_pk)
update_prices = False
if address_type == "shipping":
address = order.shipping_address
success_msg = pgettext_lazy("Dashboard message", "Updated shipping address")
update_prices = True
else:
address = order.billing_address
success_msg = pgettext_lazy("Dashboard message", "Updated billing address")
form = AddressForm(request.POST or None, instance=address)
if form.is_valid():
updated_address = form.save()
if not address:
save_address_in_order(order, updated_address, address_type)
if update_prices:
update_order_prices(order, request.discounts)
if not order.is_draft():
events.order_updated_address_event(
order=order, user=request.user, address=address
)
messages.success(request, success_msg)
return redirect("dashboard:order-details", order_pk=order_pk)
ctx = {"order": order, "address_type": address_type, "form": form}
return TemplateResponse(request, "dashboard/order/address_form.html", ctx)
@staff_member_required
@permission_required("order.manage_orders")
def order_customer_edit(request, order_pk):
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
form = OrderCustomerForm(request.POST or None, instance=order)
status = 200
if form.is_valid():
form.save()
update_order_prices(order, request.discounts)
user_email = form.cleaned_data.get("user_email")
user = form.cleaned_data.get("user")
if user_email:
msg = (
pgettext_lazy("Dashboard message", "%s email assigned to an order")
% user_email
)
elif user:
msg = (
pgettext_lazy("Dashboard message", "%s user assigned to an order")
% user
)
else:
msg = pgettext_lazy("Dashboard message", "Guest user assigned to an order")
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order_pk)
elif form.errors:
status = 400
ctx = {"order": order, "form": form}
return TemplateResponse(
request, "dashboard/order/modal/edit_customer.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def order_customer_remove(request, order_pk):
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
form = OrderRemoveCustomerForm(request.POST or None, instance=order)
if form.is_valid():
form.save()
update_order_prices(order, request.discounts)
msg = pgettext_lazy("Dashboard message", "Customer removed from an order")
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order_pk)
return redirect("dashboard:order-customer-edit", order_pk=order.pk)
@staff_member_required
@permission_required("order.manage_orders")
def order_shipping_edit(request, order_pk):
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
taxes = get_taxes_for_address(order.shipping_address)
form = OrderShippingForm(request.POST or None, instance=order, taxes=taxes)
status = 200
if form.is_valid():
form.save()
msg = pgettext_lazy("Dashboard message", "Shipping updated")
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order_pk)
elif form.errors:
status = 400
ctx = {"order": order, "form": form}
return TemplateResponse(
request, "dashboard/order/modal/edit_shipping.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def order_shipping_remove(request, order_pk):
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
form = OrderRemoveShippingForm(request.POST or None, instance=order)
if form.is_valid():
form.save()
msg = pgettext_lazy("Dashboard message", "Shipping removed")
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order_pk)
return redirect("dashboard:order-shipping-edit", order_pk=order.pk)
@staff_member_required
@permission_required("order.manage_orders")
def order_discount_edit(request, order_pk):
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
form = OrderEditDiscountForm(request.POST or None, instance=order)
status = 200
if form.is_valid():
form.save()
msg = pgettext_lazy("Dashboard message", "Discount updated")
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order_pk)
elif form.errors:
status = 400
ctx = {"order": order, "form": form}
return TemplateResponse(
request, "dashboard/order/modal/edit_discount.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def order_voucher_edit(request, order_pk):
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
form = OrderEditVoucherForm(request.POST or None, instance=order)
status = 200
if form.is_valid():
form.save()
msg = pgettext_lazy("Dashboard message", "Voucher updated")
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order_pk)
elif form.errors:
status = 400
ctx = {"order": order, "form": form}
return TemplateResponse(
request, "dashboard/order/modal/edit_voucher.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def cancel_order(request, order_pk):
orders = Order.objects.confirmed().prefetch_related("lines")
order = get_object_or_404(orders, pk=order_pk)
status = 200
form = CancelOrderForm(request.POST or None, order=order)
if form.is_valid():
msg = pgettext_lazy("Dashboard message", "Order canceled")
with transaction.atomic():
form.cancel_order(request.user)
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
# TODO: send status confirmation email
elif form.errors:
status = 400
ctx = {"form": form, "order": order}
return TemplateResponse(
request, "dashboard/order/modal/cancel_order.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def order_voucher_remove(request, order_pk):
order = get_object_or_404(Order.objects.drafts(), pk=order_pk)
form = OrderRemoveVoucherForm(request.POST or None, instance=order)
if form.is_valid():
msg = pgettext_lazy("Dashboard message", "Removed voucher from order")
with transaction.atomic():
form.remove_voucher()
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
return redirect("dashboard:order-voucher-edit", order_pk=order.pk)
@staff_member_required
@permission_required("order.manage_orders")
def order_invoice(request, order_pk):
orders = Order.objects.confirmed().prefetch_related(
"user", "shipping_address", "billing_address", "voucher"
)
order = get_object_or_404(orders, pk=order_pk)
absolute_url = get_statics_absolute_url(request)
pdf_file, order = create_invoice_pdf(order, absolute_url)
response = HttpResponse(pdf_file, content_type="application/pdf")
name = "invoice-%s.pdf" % order.id
response["Content-Disposition"] = "filename=%s" % name
return response
@staff_member_required
@permission_required("order.manage_orders")
def mark_order_as_paid(request, order_pk):
order = get_object_or_404(Order.objects.confirmed(), pk=order_pk)
status = 200
form = OrderMarkAsPaidForm(request.POST or None, order=order, user=request.user)
if form.is_valid():
with transaction.atomic():
form.save()
msg = pgettext_lazy("Dashboard message", "Order manually marked as paid")
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
elif form.errors:
status = 400
ctx = {"form": form, "order": order}
return TemplateResponse(
request, "dashboard/order/modal/mark_as_paid.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def fulfillment_packing_slips(request, order_pk, fulfillment_pk):
orders = Order.objects.confirmed().prefetch_related(
"user", "shipping_address", "billing_address"
)
order = get_object_or_404(orders, pk=order_pk)
fulfillments = order.fulfillments.prefetch_related("lines", "lines__order_line")
fulfillment = get_object_or_404(fulfillments, pk=fulfillment_pk)
absolute_url = get_statics_absolute_url(request)
pdf_file, order = create_packing_slip_pdf(order, fulfillment, absolute_url)
response = HttpResponse(pdf_file, content_type="application/pdf")
name = "packing-slip-%s.pdf" % (order.id,)
response["Content-Disposition"] = "filename=%s" % name
return response
@staff_member_required
@permission_required("order.manage_orders")
def fulfill_order_lines(request, order_pk):
orders = Order.objects.confirmed().prefetch_related("lines")
order = get_object_or_404(orders, pk=order_pk)
unfulfilled_lines = order.lines.filter(quantity_fulfilled__lt=F("quantity"))
status = 200
form = FulfillmentForm(request.POST or None, order=order, instance=Fulfillment())
FulfillmentLineFormSet = modelformset_factory(
FulfillmentLine,
form=FulfillmentLineForm,
extra=len(unfulfilled_lines),
formset=BaseFulfillmentLineFormSet,
)
initial = [
{"order_line": line, "quantity": line.quantity_unfulfilled}
for line in unfulfilled_lines
]
formset = FulfillmentLineFormSet(
request.POST or None, queryset=FulfillmentLine.objects.none(), initial=initial
)
all_line_forms_valid = all([line_form.is_valid() for line_form in formset])
if all_line_forms_valid and formset.is_valid() and form.is_valid():
forms_to_save = [
line_form
for line_form in formset
if line_form.cleaned_data.get("quantity") > 0
]
if forms_to_save:
fulfillment = form.save()
quantities = []
order_lines = []
quantity_fulfilled = 0
for line_form in forms_to_save:
line = line_form.save(commit=False)
line.fulfillment = fulfillment
line.save()
quantity = line_form.cleaned_data.get("quantity")
quantity_fulfilled += quantity
quantities.append(quantity)
order_lines.append(line)
# update to refresh prefetched lines quantity_fulfilled
order = orders.get(pk=order_pk)
update_order_status(order)
msg = npgettext_lazy(
"Dashboard message related to an order",
"Fulfilled %(quantity_fulfilled)d item",
"Fulfilled %(quantity_fulfilled)d items",
number="quantity_fulfilled",
) % {"quantity_fulfilled": quantity_fulfilled}
events.fulfillment_fulfilled_items_event(
order=order,
user=request.user,
fulfillment_lines=fulfillment.lines.all(),
)
if form.cleaned_data.get("send_mail"):
send_fulfillment_confirmation_to_customer(
order, fulfillment, request.user
)
else:
msg = pgettext_lazy(
"Dashboard message related to an order", "No items fulfilled"
)
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
elif form.errors:
status = 400
ctx = {
"form": form,
"formset": formset,
"order": order,
"unfulfilled_lines": unfulfilled_lines,
}
template = "dashboard/order/fulfillment.html"
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
@permission_required("order.manage_orders")
def cancel_fulfillment(request, order_pk, fulfillment_pk):
orders = Order.objects.confirmed().prefetch_related("fulfillments")
order = get_object_or_404(orders, pk=order_pk)
fulfillment = get_object_or_404(order.fulfillments, pk=fulfillment_pk)
status = 200
form = CancelFulfillmentForm(request.POST or None, fulfillment=fulfillment)
if form.is_valid():
msg = pgettext_lazy(
"Dashboard message", "Fulfillment #%(fulfillment)s canceled"
) % {"fulfillment": fulfillment.composed_id}
with transaction.atomic():
form.cancel_fulfillment(request.user)
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
elif form.errors:
status = 400
ctx = {"form": form, "order": order, "fulfillment": fulfillment}
return TemplateResponse(
request, "dashboard/order/modal/cancel_fulfillment.html", ctx, status=status
)
@staff_member_required
@permission_required("order.manage_orders")
def change_fulfillment_tracking(request, order_pk, fulfillment_pk):
orders = Order.objects.confirmed().prefetch_related("fulfillments")
order = get_object_or_404(orders, pk=order_pk)
fulfillment = get_object_or_404(order.fulfillments, pk=fulfillment_pk)
status = 200
form = FulfillmentTrackingNumberForm(request.POST or None, instance=fulfillment)
if form.is_valid():
form.save()
events.fulfillment_tracking_updated_event(
order=order,
user=request.user,
tracking_number=request.POST.get("tracking_number"),
fulfillment=fulfillment,
)
if form.cleaned_data.get("send_mail"):
events.email_sent_event(
order=order,
email_type=events.OrderEventsEmails.TRACKING_UPDATED,
user=request.user,
)
send_fulfillment_update.delay(order.pk, fulfillment.pk)
msg = pgettext_lazy(
"Dashboard message", "Fulfillment #%(fulfillment)s tracking number updated"
) % {"fulfillment": fulfillment.composed_id}
messages.success(request, msg)
return redirect("dashboard:order-details", order_pk=order.pk)
elif form.errors:
status = 400
ctx = {"form": form, "order": order, "fulfillment": fulfillment}
return TemplateResponse(
request, "dashboard/order/modal/fulfillment_tracking.html", ctx, status=status
)
@staff_member_required
def ajax_order_shipping_methods_list(request, order_pk):
order = get_object_or_404(Order, pk=order_pk)
queryset = ShippingMethod.objects.prefetch_related("shipping_zone").order_by(
"name", "price"
)
if order.shipping_address:
country_code = order.shipping_address.country.code
queryset = queryset.filter(shipping_zone__countries__contains=country_code)
search_query = request.GET.get("q", "")
if search_query:
queryset = queryset.filter(
Q(name__icontains=search_query) | Q(price__icontains=search_query)
)
shipping_methods = [
{"id": method.pk, "text": method.get_ajax_label()} for method in queryset
]
return JsonResponse({"results": shipping_methods})
|
the-stack_0_12173 | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
@LOSSES.register_module()
class MSELoss(nn.Module):
"""MSE loss.
Args:
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". Defaults to 'mean'.
loss_weight (float): Weight of the loss. Defaults to 1.0.
"""
def __init__(self,
reduction='mean',
loss_weight=1.0,
loss_name='mse_loss',
):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
self.loss_name = loss_name
self.cls_criterion = nn.MSELoss(reduction=reduction)
def get_name(self):
return 'MSE_loss'
def forward(self,
cls_score,
reg_label,
**kwargs):
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
reg_label
)
return loss_cls
@LOSSES.register_module()
class CCCLoss(nn.Module):
"""CCC loss for VA regression
"""
def __init__(self,
reduction='mean',
loss_weight=1.0,
):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def get_name(self):
return 'CCC_loss'
def forward(self,
cls_score,
reg_label,
**kwargs):
x, y = cls_score, reg_label
vx = x - torch.mean(x)
vy = y - torch.mean(y)
rho = torch.sum(vx * vy) / ((torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2)))+1e-10)
x_m = torch.mean(x)
y_m = torch.mean(y)
x_s = torch.std(x)
y_s = torch.std(y)
ccc = 2 * rho * x_s * y_s / ((x_s ** 2 + y_s ** 2 + (x_m - y_m) ** 2)+1e-10)
loss = 1 - ccc
return loss * self.loss_weight
|
the-stack_0_12177 | from typing import Optional
from sqlalchemy.exc import IntegrityError
from app.email_utils import (
get_email_domain_part,
send_cannot_create_directory_alias,
send_cannot_create_domain_alias,
email_belongs_to_alias_domains,
)
from app.errors import AliasInTrashError
from app.extensions import db
from app.log import LOG
from app.models import (
Alias,
CustomDomain,
Directory,
User,
DeletedAlias,
DomainDeletedAlias,
AliasMailbox,
)
def try_auto_create(address: str) -> Optional[Alias]:
"""Try to auto-create the alias using directory or catch-all domain
"""
alias = try_auto_create_catch_all_domain(address)
if not alias:
alias = try_auto_create_directory(address)
return alias
def try_auto_create_directory(address: str) -> Optional[Alias]:
"""
Try to create an alias with directory
"""
# check if alias belongs to a directory, ie having directory/anything@EMAIL_DOMAIN format
if email_belongs_to_alias_domains(address):
# if there's no directory separator in the alias, no way to auto-create it
if "/" not in address and "+" not in address and "#" not in address:
return None
# alias contains one of the 3 special directory separator: "/", "+" or "#"
if "/" in address:
sep = "/"
elif "+" in address:
sep = "+"
else:
sep = "#"
directory_name = address[: address.find(sep)]
LOG.d("directory_name %s", directory_name)
directory = Directory.get_by(name=directory_name)
if not directory:
return None
dir_user: User = directory.user
if not dir_user.can_create_new_alias():
send_cannot_create_directory_alias(dir_user, address, directory_name)
return None
try:
LOG.d("create alias %s for directory %s", address, directory)
mailboxes = directory.mailboxes
alias = Alias.create(
email=address,
user_id=directory.user_id,
directory_id=directory.id,
mailbox_id=mailboxes[0].id,
)
db.session.flush()
for i in range(1, len(mailboxes)):
AliasMailbox.create(
alias_id=alias.id, mailbox_id=mailboxes[i].id,
)
db.session.commit()
return alias
except AliasInTrashError:
LOG.warning(
"Alias %s was deleted before, cannot auto-create using directory %s, user %s",
address,
directory_name,
dir_user,
)
return None
def try_auto_create_catch_all_domain(address: str) -> Optional[Alias]:
"""Try to create an alias with catch-all domain"""
# try to create alias on-the-fly with custom-domain catch-all feature
# check if alias is custom-domain alias and if the custom-domain has catch-all enabled
alias_domain = get_email_domain_part(address)
custom_domain = CustomDomain.get_by(domain=alias_domain)
if not custom_domain:
return None
# custom_domain exists
if not custom_domain.catch_all:
return None
# custom_domain has catch-all enabled
domain_user: User = custom_domain.user
if not domain_user.can_create_new_alias():
send_cannot_create_domain_alias(domain_user, address, alias_domain)
return None
try:
LOG.d("create alias %s for domain %s", address, custom_domain)
alias = Alias.create(
email=address,
user_id=custom_domain.user_id,
custom_domain_id=custom_domain.id,
automatic_creation=True,
mailbox_id=domain_user.default_mailbox_id,
)
db.session.commit()
return alias
except AliasInTrashError:
LOG.warning(
"Alias %s was deleted before, cannot auto-create using domain catch-all %s, user %s",
address,
custom_domain,
domain_user,
)
return None
def delete_alias(alias: Alias, user: User):
Alias.delete(alias.id)
db.session.commit()
# save deleted alias to either global or domain trash
if alias.custom_domain_id:
try:
DomainDeletedAlias.create(
user_id=user.id, email=alias.email, domain_id=alias.custom_domain_id
)
db.session.commit()
except IntegrityError:
LOG.exception(
"alias %s domain %s has been added before to DeletedAlias",
alias.email,
alias.custom_domain_id,
)
db.session.rollback()
else:
try:
DeletedAlias.create(email=alias.email)
db.session.commit()
except IntegrityError:
LOG.exception("alias %s has been added before to DeletedAlias", alias.email)
db.session.rollback()
|
the-stack_0_12178 | from typing import List
from tdw.output_data import Environments as Envs
from magnebot.util import get_data
class Room:
"""
Data for a room in a scene.
"""
def __init__(self, env: Envs, i: int):
"""
:param env: The environments output data.
:param i: The index of this environment in env.get_num()
"""
""":field
The ID of the room.
"""
self.room_id: int = env.get_id(i)
""":field
The center of the room.
"""
self.center = env.get_center(i)
""":field
The bounds of the room.
"""
self.bounds = env.get_bounds(i)
""":field
Minimum x positional coordinate of the room.
"""
self.x_0: float = self.center[0] - (self.bounds[0] / 2)
""":field
Minimum z positional coordinate of the room.
"""
self.z_0: float = self.center[2] - (self.bounds[2] / 2)
""":field
Maximum x positional coordinate of the room.
"""
self.x_1: float = self.center[0] + (self.bounds[0] / 2)
""":field
Maximum z positional coordinate of the room.
"""
self.z_1: float = self.center[2] + (self.bounds[2] / 2)
def is_inside(self, x: float, z: float) -> bool:
"""
:param x: The x coordinate.
:param z: The z coordinate.
:return: True if position (x, z) is in the environment.
"""
return self.x_0 <= x <= self.x_1 and self.z_0 <= z <= self.z_1
class SceneEnvironment:
"""
Data for the scene environment and its rooms.
"""
def __init__(self, resp: List[bytes]):
"""
:param resp: The response from the build.
"""
env = get_data(resp=resp, d_type=Envs)
# Get the overall size of the scene.
""":field
Minimum x positional coordinate of the scene.
"""
self.x_min: float = 1000
""":field
Maximum x positional coordinate of the scene.
"""
self.x_max: float = 0
""":field
Minimum z positional coordinate of the scene.
"""
self.z_min: float = 1000
""":field
Maximum z positional coordinate of the scene.
"""
self.z_max: float = 0
""":field
All of the rooms in the scene.
"""
self.rooms: List[Room] = list()
for i in range(env.get_num()):
e = Room(env=env, i=i)
if e.x_0 < self.x_min:
self.x_min = e.x_0
if e.z_0 < self.z_min:
self.z_min = e.z_0
if e.x_1 > self.x_max:
self.x_max = e.x_1
if e.z_1 > self.z_max:
self.z_max = e.z_1
self.rooms.append(e)
|
the-stack_0_12179 | import logging
import random
import uuid
import os
import copy
from flask import Blueprint, jsonify, session, request, current_app
from datetime import datetime, timedelta
from decimal import Decimal
from sqlalchemy.sql.elements import Null
from app.models.model import Class, Student, StuCls, User, Log, Teacher, ClsWd
from app.utils.core import db
from sqlalchemy import or_, and_
from app.api.tree import Tree
from app.api.api_stu_cls import add_stu_cls, delete_stu_cls
from app.api.api_log import add_log
from app.utils.code import ResponseCode
from app.utils.response import ResMsg
from app.utils.util import route, Redis, CaptchaTool, PhoneTool
from app.utils.auth import Auth, login_required
from app.api.report import excel_write, word_write, pdf_write
from app.api.wx_login_or_register import get_access_code, get_wx_user_info, wx_login_or_register
from app.api.phone_login_or_register import SendSms, phone_login_or_register
from app.celery import add, flask_app_context
bp = Blueprint("student", __name__, url_prefix='/student/')
logger = logging.getLogger(__name__)
@route(bp, '/list', methods=["GET"])
@login_required
def student_list():
"""
获取学员列表
:return:
"""
res = ResMsg()
# obj = request.get_json(force=True)
obj = request.args
name = obj.get("name") or None
phone = obj.get("phone") or None
type = obj.get("type") or None
page_index = int(obj.get("page"))
page_size = int(obj.get("count"))
filters = {
or_(Student.name == name, name == None),
or_(Student.phone == phone, phone == None),
or_(Student.type == type, type == None),
}
# current_app.logger.debug(db.session.query(Student).filter(*filters).order_by(Student.id).limit(page_size).offset((page_index-1)*page_size))
db_student = db.session.query(Student).filter(*filters).order_by(Student.id).limit(page_size).offset((page_index-1)*page_size).all()
total_count = db.session.query(Student).filter(*filters).count()
all_class = db.session.query(Class).all()
student_list = []
for stu in db_student:
class_id = []
n_stu_cls = db.session.query(StuCls).filter(StuCls.student_id == stu.id).all()
for nstu in n_stu_cls:
for cla in all_class:
if nstu.class_id == cla.id:
class_id.append(cla)
student_list.append({
'id': stu.id,
'name': stu.name,
'type': stu.type,
'phone': stu.phone,
'birthday': stu.birthday,
'age': stu.age,
'class_id': class_id,
'used_hour': stu.used_hour,
'left_hour': stu.left_hour,
'remark': stu.remark
})
data = {
"students": student_list,
"page": page_index,
"count": page_size,
"total": total_count
}
res.update(data=data)
return res.data
@route(bp, '/detail', methods=["GET"])
@login_required
def student_detail():
"""
获取单个学员信息
:return:
"""
res = ResMsg()
obj = request.args
db_student = db.session.query(Student).filter(Student.id == obj['id']).first()
n_stu_cls = db.session.query(StuCls).filter(StuCls.student_id == obj['id']).all()
stu_cls_ids = []
for stucls in n_stu_cls:
stu_cls_ids.append(stucls.class_id)
n_class = db.session.query(Class).filter(Class.id.in_(stu_cls_ids)).all()
data = {
"detail": db_student,
"classes": n_class
}
res.update(data=data)
return res.data
@route(bp, '/add', methods=["POST"])
@login_required
def student_add():
"""
新增学员信息
:return:
"""
res = ResMsg()
obj = request.json
n_student = Student()
n_student.name = obj["name"]
n_student.phone = obj["phone"]
n_student.birthday = obj["birthday"] or None
n_student.age = obj["age"] or None
n_student.used_hour = obj["used_hour"] or None
n_student.left_hour = obj["left_hour"] or None
n_student.type = obj["type"] or None
n_student.remark = obj["remark"] or None
n_student.status = obj["status"] or None
n_student.create_time = datetime.now()
n_student.update_time = datetime.now()
user = db.session.query(User).filter(User.name == session["user_name"]).first()
try:
db.session.add(n_student)
db.session.flush()
# 添加日志
add_log(1, user.id, n_student.id, None, None, '新增了学员信息')
if len(obj["classArr"]) > 0:
for o in obj["classArr"]:
add_stu_cls(n_student.id, o["id"])
db.session.commit()
except:
db.session.rollback()
return res.data
@route(bp, '/edit', methods=["POST"])
@login_required
def student_edit():
"""
编辑学员信息
:return:
"""
res = ResMsg()
obj = request.json
# o_student = db.session.query(Student).filter(Student.id == obj["id"]).first()
n_student = db.session.query(Student).filter(Student.id == obj["id"]).first()
o_student = copy.deepcopy(n_student)
n_student.name = obj["name"]
n_student.phone = obj["phone"]
n_student.birthday = obj["birthday"] or None
n_student.age = obj["age"] or None
n_student.used_hour = obj["used_hour"] or None
n_student.left_hour = obj["left_hour"] or None
n_student.type = obj["type"] or None
n_student.remark = obj["remark"]
n_student.status = obj["status"] or None
n_student.update_time = datetime.now()
classIdArr = []
if len(obj["classArr"]) > 0:
for o in obj["classArr"]:
classIdArr.append(str(o["id"]))
stuClsIdArr = []
n_stu_cls = db.session.query(StuCls).filter(StuCls.student_id == n_student.id).all()
user = db.session.query(User).filter(User.name == session["user_name"]).first()
n_class = db.session.query(Class).all()
for stu in n_stu_cls:
stuClsIdArr.append(str(stu.class_id))
try:
# 如果更改了课时,则添加日志
if o_student.used_hour != n_student.used_hour or o_student.left_hour != n_student.left_hour:
add_log(3, user.id, n_student.id, None, None, '更改了课时,更改前已用课时 ' + str(o_student.used_hour) + ',剩余课时 ' + str(o_student.left_hour) + '; 更改后已用课时 ' + str(n_student.used_hour) + ',剩余课时 ' + str(n_student.left_hour))
elif o_student.remark != n_student.remark:
add_log(1, user.id, n_student.id, None, None, '将备注更改为: ' + n_student.remark)
else:
add_log(1, user.id, n_student.id, None, None, '更改了学员资料')
db.session.add(n_student)
db.session.commit()
if len(classIdArr) > 0:
for cid in classIdArr:
if cid not in stuClsIdArr:
add_stu_cls(n_student.id, cid)
# 添加日志
class_name = ''
for nls in n_class:
if nls.id == cid:
class_name = nls.class_name
break
add_log(2, user.id, n_student.id, None, None, '将其添加到了班级:' + class_name + '中')
for ccid in stuClsIdArr:
if ccid not in classIdArr:
delete_stu_cls(n_student.id, ccid)
# 添加日志
class_name = ''
for nls in n_class:
if nls.id == ccid:
class_name = nls.class_name
break
add_log(2, user.id, n_student.id, None, None, '将其添加从班级:' + class_name + '中移除')
except:
db.session.rollback()
return res.data
@route(bp, '/delete', methods=["POST"])
@login_required
def student_delete():
"""
删除学员信息
:return:
"""
res = ResMsg()
obj = request.json
n_student = db.session.query(Student).filter(Student.id == obj["id"]).first()
n_stu_cls = db.session.query(StuCls).filter(StuCls.student_id == obj["id"])
try:
db.session.delete(n_student)
n_stu_cls.delete(synchronize_session=False)
db.session.commit()
except:
db.session.rollback()
return res.data
@route(bp, '/logs', methods=["GET"])
@login_required
def log_list():
"""
获取日志列表
:return:
"""
res = ResMsg()
obj = request.args
n_user = db.session.query(User).all()
n_teacher = db.session.query(Teacher).all()
n_student = db.session.query(Student).all()
n_class = db.session.query(Class).all()
n_log = db.session.query(Log).filter(Log.student_id == obj['sid']).order_by(Log.id.desc()).all()
dataList = []
if len(n_log) > 0:
for log in n_log:
operator_name = ''
teacher_name = ''
student_name = ''
class_name = ''
for user in n_user:
if user.id == log.operator_id:
operator_name = user.nick_name
for teacher in n_teacher:
if teacher.id == log.teacher_id:
teacher_name = teacher.name
for student in n_student:
if student.id == log.student_id:
student_name = student.name
for cls in n_class:
if cls.id == log.class_id:
class_name = cls.class_name
dataList.append({
'id': log.id,
'type': log.type,
'time': log.time,
'teacher_id': log.teacher_id,
'teacher_name': teacher_name,
'student_id': log.student_id,
'student_name': student_name,
'class_id': log.class_id,
'class_name': class_name,
'operator_id': log.operator_id,
'operator_name': operator_name,
'remark': log.remark
})
data = {
'dataList': dataList
}
res.update(data=data)
return res.data
@route(bp, '/course', methods=["GET"])
@login_required
def course_list():
"""
获取课程列表
:return:
"""
res = ResMsg()
obj = request.args
stu_cls = db.session.query(StuCls).filter(StuCls.student_id == obj['sid']).all()
classid_arr = []
for sc in stu_cls:
classid_arr.append(sc.class_id)
cls_wd = db.session.query(ClsWd).filter(ClsWd.class_id.in_(classid_arr)).all()
n_class = db.session.query(Class).filter(Class.id.in_(classid_arr)).all()
weekSet = set([])
for sw in cls_wd:
weekSet.add(sw.weekday)
course_list = {}
for wd in weekSet:
week_cls = []
# 获取每周几的班级id列表
for clswd in cls_wd:
if clswd.weekday == wd:
week_cls.append(clswd.class_id)
cls_arr = []
for wc in week_cls:
for nc in n_class:
if wc == nc.id:
cls_arr.append(nc)
course_list[wd] = cls_arr
res.update(data = {
'course_list': course_list
})
return res.data
# -----------------原生蓝图路由---------------#
@bp.route('/logs', methods=["GET"])
def test_logger():
"""
测试自定义logger
:return:
"""
logger.info("this is info")
logger.debug("this is debug")
logger.warning("this is warning")
logger.error("this is error")
logger.critical("this is critical")
data = User.query.all()
return data
# return "ok"
@bp.route("/unifiedResponse", methods=["GET"])
def test_unified_response():
"""
测试统一返回消息
:return:
"""
res = ResMsg()
test_dict = dict(name="zhang", age=18)
res.update(code=ResponseCode.Success, data=test_dict)
return jsonify(res.data)
# --------------使用自定义封装蓝图路由--------------------#
@route(bp, '/packedResponse', methods=["GET"])
def test_packed_response():
"""
测试响应封装
:return:
"""
res = ResMsg()
test_dict = dict(name="zhang", age=18)
data = db.session.query(User).all()
# data = User.name.query.all()
logger.info(type(data))
# 此处只需要填入响应状态码,即可获取到对应的响应消息
res.update(code=ResponseCode.Success, data=data)
# 此处不再需要用jsonify,如果需要定制返回头或者http响应如下所示
# return res.data,200,{"token":"111"}
return res.data
@route(bp, '/typeResponse', methods=["GET"])
def test_type_response():
"""
测试返回不同的类型
:return:
"""
res = ResMsg()
now = datetime.now()
date = datetime.now().date()
num = Decimal(11.11)
test_dict = dict(now=now, date=date, num=num)
# 此处只需要填入响应状态码,即可获取到对应的响应消息
res.update(code=ResponseCode.Success, data=test_dict)
# 此处不再需要用jsonify,如果需要定制返回头或者http响应如下所示
# return res.data,200,{"token":"111"}
return res.data
# --------------Redis测试封装--------------------#
@route(bp, '/testRedisWrite', methods=['GET'])
def test_redis_write():
"""
测试redis写入
"""
# 写入
Redis.write("test_key", "test_value", 60)
return "ok"
@route(bp, '/testRedisRead', methods=['GET'])
def test_redis_read():
"""
测试redis获取
"""
data = Redis.read("test_key")
return data
# -----------------图形验证码测试---------------------------#
@route(bp, '/testGetCaptcha', methods=["GET"])
def test_get_captcha():
"""
获取图形验证码
:return:
"""
res = ResMsg()
new_captcha = CaptchaTool()
img, code = new_captcha.get_verify_code()
res.update(data=img)
session["code"] = code
return res.data
@route(bp, '/testVerifyCaptcha', methods=["POST"])
def test_verify_captcha():
"""
验证图形验证码
:return:
"""
res = ResMsg()
obj = request.get_json(force=True)
code = obj.get('code', None)
s_code = session.get("code", None)
print(code, s_code)
if not all([code, s_code]):
res.update(code=ResponseCode.InvalidParameter)
return res.data
if code != s_code:
res.update(code=ResponseCode.VerificationCodeError)
return res.data
return res.data
# --------------------JWT测试-----------------------------------------#
@route(bp, '/testLogin', methods=["POST"])
def test_login():
"""
登陆成功获取到数据获取token和刷新token
:return:
"""
res = ResMsg()
obj = request.get_json(force=True)
user_name = obj.get("name")
# 未获取到参数或参数不存在
if not obj or not user_name:
res.update(code=ResponseCode.InvalidParameter)
return res.data
if user_name == "qin":
# 生成数据获取token和刷新token
access_token, refresh_token = Auth.encode_auth_token(user_id=user_name)
data = {"access_token": access_token.decode("utf-8"),
"refresh_token": refresh_token.decode("utf-8")
}
res.update(data=data)
return res.data
else:
res.update(code=ResponseCode.AccountOrPassWordErr)
return res.data
@route(bp, '/testGetData', methods=["GET"])
@login_required
def test_get_data():
"""
测试登陆保护下获取数据
:return:
"""
res = ResMsg()
name = session.get("user_name")
data = "{},你好!!".format(name)
res.update(data=data)
return res.data
@route(bp, '/testRefreshToken', methods=["GET"])
def test_refresh_token():
"""
刷新token,获取新的数据获取token
:return:
"""
res = ResMsg()
refresh_token = request.args.get("refresh_token")
if not refresh_token:
res.update(code=ResponseCode.InvalidParameter)
return res.data
payload = Auth.decode_auth_token(refresh_token)
# token被串改或过期
if not payload:
res.update(code=ResponseCode.PleaseSignIn)
return res.data
# 判断token正确性
if "user_id" not in payload:
res.update(code=ResponseCode.PleaseSignIn)
return res.data
# 获取新的token
access_token = Auth.generate_access_token(user_id=payload["user_id"])
data = {"access_token": access_token.decode("utf-8"), "refresh_token": refresh_token}
res.update(data=data)
return res.data
# --------------------测试Excel报表输出-------------------------------#
@route(bp, '/testExcel', methods=["GET"])
def test_excel():
"""
测试excel报表输出
:return:
"""
res = ResMsg()
report_path = current_app.config.get("REPORT_PATH", "./report")
file_name = "{}.xlsx".format(uuid.uuid4().hex)
path = os.path.join(report_path, file_name)
path = excel_write(path)
path = path.lstrip(".")
res.update(data=path)
return res.data
# --------------------测试Word报表输出-------------------------------#
@route(bp, '/testWord', methods=["GET"])
def test_word():
"""
测试word报表输出
:return:
"""
res = ResMsg()
report_path = current_app.config.get("REPORT_PATH", "./report")
file_name = "{}.docx".format(uuid.uuid4().hex)
path = os.path.join(report_path, file_name)
path = word_write(path)
path = path.lstrip(".")
res.update(data=path)
return res.data
# --------------------测试无限层级目录树-------------------------------#
@route(bp, '/testTree', methods=["GET"])
def test_tree():
"""
测试无限层级目录树
:return:
"""
res = ResMsg()
data = [
{"id": 1, "father_id": None, "name": "01"},
{"id": 2, "father_id": 1, "name": "0101"},
{"id": 3, "father_id": 1, "name": "0102"},
{"id": 4, "father_id": 1, "name": "0103"},
{"id": 5, "father_id": 2, "name": "010101"},
{"id": 6, "father_id": 2, "name": "010102"},
{"id": 7, "father_id": 2, "name": "010103"},
{"id": 8, "father_id": 3, "name": "010201"},
{"id": 9, "father_id": 4, "name": "010301"},
{"id": 10, "father_id": 9, "name": "01030101"},
{"id": 11, "father_id": 9, "name": "01030102"},
]
new_tree = Tree(data=data)
data = new_tree.build_tree()
res.update(data=data)
return res.data
# --------------------测试微信登陆注册-------------------------------#
@route(bp, '/testWXLoginOrRegister', methods=["GET"])
def test_wx_login_or_register():
"""
测试微信登陆注册
:return:
"""
res = ResMsg()
code = request.args.get("code")
flag = request.args.get("flag")
# 参数错误
if code is None or flag is None:
res.update(code=ResponseCode.InvalidParameter)
return res.data
# 获取微信用户授权码
access_code = get_access_code(code=code, flag=flag)
if access_code is None:
res.update(code=ResponseCode.WeChatAuthorizationFailure)
return res.data
# 获取微信用户信息
wx_user_info = get_wx_user_info(access_data=access_code)
if wx_user_info is None:
res.update(code=ResponseCode.WeChatAuthorizationFailure)
return res.data
# 验证微信用户信息本平台是否有,
data = wx_login_or_register(wx_user_info=wx_user_info)
if data is None:
res.update(code=ResponseCode.Fail)
return res.data
res.update(data=data)
return res.data
# --------------------测试手机短信验证码登陆注册-------------------------------#
@route(bp, '/testGetVerificationCode', methods=["GET"])
def test_get_verification_code():
"""
获取手机验证码
:return:
"""
now = datetime.now()
res = ResMsg()
category = request.args.get("category", None)
# category 参数如下:
# authentication: 身份验证
# login_confirmation: 登陆验证
# login_exception: 登陆异常
# user_registration: 用户注册
# change_password: 修改密码
# information_change: 信息修改
phone = request.args.get('phone', None)
# 验证手机号码正确性
re_phone = PhoneTool.check_phone(phone)
if phone is None or re_phone is None:
res.update(code=ResponseCode.MobileNumberError)
return res.data
if category is None:
res.update(code=ResponseCode.InvalidParameter)
return res.data
try:
# 获取手机验证码设置时间
flag = Redis.hget(re_phone, 'expire_time')
if flag is not None:
flag = datetime.strptime(flag, '%Y-%m-%d %H:%M:%S')
# 判断是否重复操作
if (flag - now).total_seconds() < 60:
res.update(code=ResponseCode.FrequentOperation)
return res.data
# 获取随机验证码
code = "".join([str(random.randint(0, 9)) for _ in range(6)])
template_param = {"code": code}
# 发送验证码
sms = SendSms(phone=re_phone, category=category, template_param=template_param)
sms.send_sms()
# 将验证码存入redis,方便接下来的验证
Redis.hset(re_phone, "code", code)
# 设置重复操作屏障
Redis.hset(re_phone, "expire_time", (now + timedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S'))
# 设置验证码过去时间
Redis.expire(re_phone, 60 * 3)
return res.data
except Exception as e:
logger.exception(e)
res.update(code=ResponseCode.Fail)
return res.data
@route(bp, '/testPhoneLoginOrRegister', methods=["POST"])
def test_phone_login_or_register():
"""
用户验证码登录或注册
:return:
"""
res = ResMsg()
obj = request.get_json(force=True)
phone = obj.get('account', None)
code = obj.get('code', None)
if phone is None or code is None:
res.update(code=ResponseCode.InvalidParameter)
return res.data
# 验证手机号和验证码是否正确
flag = PhoneTool.check_phone_code(phone, code)
if not flag:
res.update(code=ResponseCode.InvalidOrExpired)
return res.data
# 登陆或注册
data = phone_login_or_register(phone)
if data is None:
res.update(code=ResponseCode.Fail)
return res.data
res.update(data=data)
return res.data
# --------------------测试PDF报表输出-------------------------------#
@route(bp, '/testPDF', methods=["GET"])
def test_pdf():
"""
测试pdf报表输出
:return:
"""
res = ResMsg()
report_path = current_app.config.get("REPORT_PATH", "./report")
file_name = "{}.pdf".format(uuid.uuid4().hex)
path = os.path.join(report_path, file_name)
path = pdf_write(path)
path = path.lstrip(".")
res.update(data=path)
return res.data
# --------------------测试Celery-------------------------------#
@route(bp, '/testCeleryAdd', methods=["GET"])
def test_add():
"""
测试相加
:return:
"""
result = add.delay(1, 2)
return result.get(timeout=1)
@route(bp, '/testCeleryFlaskAppContext', methods=["GET"])
def test_flask_app_context():
"""
测试获取flask上下文
:return:
"""
result = flask_app_context.delay()
return result.get(timeout=1)
|
the-stack_0_12184 | import logging
from datetime import timedelta
from typing import Dict
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.exceptions import (
NON_FIELD_ERRORS,
PermissionDenied,
ValidationError,
)
from django.forms.utils import ErrorList
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.views.generic import (
CreateView,
DetailView,
ListView,
UpdateView,
)
from django_filters.rest_framework import DjangoFilterBackend
from guardian.mixins import (
LoginRequiredMixin,
PermissionListMixin,
PermissionRequiredMixin as ObjectPermissionRequiredMixin,
)
from guardian.shortcuts import get_perms
from rest_framework.permissions import DjangoObjectPermissions
from rest_framework.viewsets import ReadOnlyModelViewSet
from rest_framework_guardian.filters import ObjectPermissionsFilter
from grandchallenge.algorithms.filters import AlgorithmFilter, JobViewsetFilter
from grandchallenge.algorithms.forms import (
AlgorithmForm,
AlgorithmImageForm,
AlgorithmImageUpdateForm,
AlgorithmPermissionRequestUpdateForm,
JobForm,
UsersForm,
ViewersForm,
)
from grandchallenge.algorithms.models import (
Algorithm,
AlgorithmImage,
AlgorithmPermissionRequest,
Job,
)
from grandchallenge.algorithms.serializers import (
AlgorithmImageSerializer,
AlgorithmSerializer,
HyperlinkedJobSerializer,
)
from grandchallenge.algorithms.tasks import create_algorithm_jobs_for_session
from grandchallenge.cases.forms import UploadRawImagesForm
from grandchallenge.cases.models import RawImageUploadSession
from grandchallenge.core.filters import FilterMixin
from grandchallenge.core.forms import UserFormKwargsMixin
from grandchallenge.core.permissions.mixins import UserIsNotAnonMixin
from grandchallenge.core.templatetags.random_encode import random_encode
from grandchallenge.core.views import PermissionRequestUpdate
from grandchallenge.credits.models import Credit
from grandchallenge.datatables.views import Column, PaginatedTableListView
from grandchallenge.groups.forms import EditorsForm
from grandchallenge.groups.views import UserGroupUpdateMixin
from grandchallenge.subdomains.utils import reverse
logger = logging.getLogger(__name__)
class AlgorithmCreate(
PermissionRequiredMixin, UserFormKwargsMixin, CreateView,
):
model = Algorithm
form_class = AlgorithmForm
permission_required = (
f"{Algorithm._meta.app_label}.add_{Algorithm._meta.model_name}"
)
def form_valid(self, form):
response = super().form_valid(form=form)
self.object.add_editor(self.request.user)
return response
class AlgorithmList(PermissionListMixin, FilterMixin, ListView):
model = Algorithm
permission_required = {
f"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}"
}
ordering = "-created"
filter_class = AlgorithmFilter
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context.update(
{
"jumbotron_title": "Algorithms",
"jumbotron_description": format_html(
(
"We have made several machine learning algorithms "
"available that you can try out by uploading your "
"own anonymised medical imaging data. "
"Please <a href='{}'>contact us</a> if you would like "
"to make your own algorithm available here."
),
random_encode("mailto:[email protected]"),
),
}
)
return context
class AlgorithmDetail(ObjectPermissionRequiredMixin, DetailView):
model = Algorithm
permission_required = (
f"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}"
)
raise_exception = True
def on_permission_check_fail(self, request, response, obj=None):
response = self.get(request)
return response
def check_permissions(self, request):
"""
Checks if *request.user* has all permissions returned by
*get_required_permissions* method.
:param request: Original request.
"""
try:
return super().check_permissions(request)
except PermissionDenied:
return HttpResponseRedirect(
reverse(
"algorithms:permission-request-create",
kwargs={"slug": self.object.slug},
)
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
form = UsersForm()
form.fields["action"].initial = UsersForm.REMOVE
editor_remove_form = EditorsForm()
editor_remove_form.fields["action"].initial = EditorsForm.REMOVE
context.update(
{"form": form, "editor_remove_form": editor_remove_form}
)
pending_permission_requests = AlgorithmPermissionRequest.objects.filter(
algorithm=context["object"],
status=AlgorithmPermissionRequest.PENDING,
).count()
context.update(
{"pending_permission_requests": pending_permission_requests}
)
context.update(
{
"average_job_duration": Job.objects.filter(
algorithm_image__algorithm=context["object"],
status=Job.SUCCESS,
).average_duration()
}
)
return context
class AlgorithmUpdate(
UserFormKwargsMixin,
LoginRequiredMixin,
ObjectPermissionRequiredMixin,
UpdateView,
):
model = Algorithm
form_class = AlgorithmForm
permission_required = (
f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
)
raise_exception = True
class AlgorithmUserGroupUpdateMixin(UserGroupUpdateMixin):
template_name = "algorithms/user_groups_form.html"
permission_required = (
f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
)
@property
def obj(self):
return get_object_or_404(Algorithm, slug=self.kwargs["slug"])
class JobUserGroupUpdateMixin(UserGroupUpdateMixin):
template_name = "algorithms/user_groups_form.html"
permission_required = (
f"{Job._meta.app_label}.change_{Job._meta.model_name}"
)
@property
def obj(self):
return get_object_or_404(Job, pk=self.kwargs["pk"])
class EditorsUpdate(AlgorithmUserGroupUpdateMixin):
form_class = EditorsForm
success_message = "Editors successfully updated"
class UsersUpdate(AlgorithmUserGroupUpdateMixin):
form_class = UsersForm
success_message = "Users successfully updated"
class JobViewersUpdate(JobUserGroupUpdateMixin):
form_class = ViewersForm
def get_success_message(self, cleaned_data):
return format_html(
(
"Viewers for {} successfully updated. <br>"
"They will be able to see the job by visiting {}"
),
self.obj,
self.obj.get_absolute_url(),
)
class AlgorithmImageCreate(
UserFormKwargsMixin,
LoginRequiredMixin,
ObjectPermissionRequiredMixin,
CreateView,
):
model = AlgorithmImage
form_class = AlgorithmImageForm
permission_required = (
f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
)
raise_exception = True
@property
def algorithm(self):
return get_object_or_404(Algorithm, slug=self.kwargs["slug"])
def get_permission_object(self):
return self.algorithm
def form_valid(self, form):
form.instance.creator = self.request.user
form.instance.algorithm = self.algorithm
uploaded_file = form.cleaned_data["chunked_upload"][0]
form.instance.staged_image_uuid = uploaded_file.uuid
return super().form_valid(form)
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context.update({"algorithm": self.algorithm})
return context
class AlgorithmImageDetail(
LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
):
model = AlgorithmImage
permission_required = f"{AlgorithmImage._meta.app_label}.view_{AlgorithmImage._meta.model_name}"
raise_exception = True
class AlgorithmImageUpdate(
LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView
):
model = AlgorithmImage
form_class = AlgorithmImageUpdateForm
permission_required = f"{AlgorithmImage._meta.app_label}.change_{AlgorithmImage._meta.model_name}"
raise_exception = True
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context.update({"algorithm": self.object.algorithm})
return context
class AlgorithmExecutionSessionCreate(
UserFormKwargsMixin,
LoginRequiredMixin,
ObjectPermissionRequiredMixin,
CreateView,
):
model = RawImageUploadSession
form_class = UploadRawImagesForm
template_name = "algorithms/algorithm_execution_session_create.html"
permission_required = (
f"{Algorithm._meta.app_label}.execute_{Algorithm._meta.model_name}"
)
raise_exception = True
@property
def algorithm(self) -> Algorithm:
return get_object_or_404(Algorithm, slug=self.kwargs["slug"])
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update(
{
"linked_task": create_algorithm_jobs_for_session.signature(
kwargs={
"algorithm_image_pk": self.algorithm.latest_ready_image.pk,
},
immutable=True,
)
}
)
return kwargs
def get_permission_object(self):
return self.algorithm
def get_initial(self):
if self.algorithm.latest_ready_image is None:
raise Http404()
return super().get_initial()
def form_valid(self, form):
form.instance.creator = self.request.user
return super().form_valid(form)
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context.update({"algorithm": self.algorithm})
context.update(
self.get_remaining_jobs(
credits_per_job=self.algorithm.credits_per_job
)
)
return context
def get_success_url(self):
return reverse(
"algorithms:execution-session-detail",
kwargs={"slug": self.kwargs["slug"], "pk": self.object.pk},
)
def get_remaining_jobs(self, *, credits_per_job: int,) -> Dict:
"""
Determines the number of jobs left for the user and when the next job can be started
:return: A dictionary containing remaining_jobs (int) and
next_job_at (datetime)
"""
now = timezone.now()
period = timedelta(days=30)
user_credit = Credit.objects.get(user=self.request.user)
if credits_per_job == 0:
return {
"remaining_jobs": 1,
"next_job_at": now,
"user_credits": user_credit.credits,
}
jobs = Job.credits_set.spent_credits(user=self.request.user)
if jobs["oldest"]:
next_job_at = jobs["oldest"] + period
else:
next_job_at = now
if jobs["total"]:
total_jobs = user_credit.credits - jobs["total"]
else:
total_jobs = user_credit.credits
return {
"remaining_jobs": int(total_jobs / max(credits_per_job, 1)),
"next_job_at": next_job_at,
"user_credits": total_jobs,
}
class AlgorithmExecutionSessionDetail(
LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
):
model = RawImageUploadSession
template_name = "algorithms/executionsession_detail.html"
permission_required = "cases.view_rawimageuploadsession"
raise_exception = True
@cached_property
def algorithm(self):
return get_object_or_404(Algorithm, slug=self.kwargs["slug"])
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context.update(
{
"algorithm": self.algorithm,
"average_job_duration": Job.objects.filter(
algorithm_image__algorithm=self.algorithm,
status=Job.SUCCESS,
).average_duration(),
}
)
return context
class JobsList(PermissionListMixin, PaginatedTableListView):
model = Job
permission_required = f"{Job._meta.app_label}.view_{Job._meta.model_name}"
row_template = "algorithms/job_list_row.html"
search_fields = [
"pk",
"creator__username",
"inputs__image__name",
"inputs__image__files__file",
"comment",
]
columns = [
Column(title="Details", sort_field="pk"),
Column(title="Created", sort_field="created"),
Column(title="Creator", sort_field="creator__username"),
Column(title="Result", sort_field="inputs__image__name"),
Column(title="Comment", sort_field="comment"),
Column(title="Visibility", sort_field="public"),
Column(title="Viewer", sort_field="inputs__image__files__file"),
]
default_sort_column = 1
@cached_property
def algorithm(self):
return get_object_or_404(Algorithm, slug=self.kwargs["slug"])
def get_queryset(self):
queryset = super().get_queryset()
return (
queryset.filter(algorithm_image__algorithm=self.algorithm,)
.prefetch_related(
"outputs__image__files",
"outputs__interface",
"inputs__image__files",
"viewers__user_set",
)
.select_related(
"creator__user_profile",
"creator__verification",
"algorithm_image__algorithm",
)
)
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context.update({"algorithm": self.algorithm})
return context
class JobDetail(ObjectPermissionRequiredMixin, DetailView):
permission_required = f"{Job._meta.app_label}.view_{Job._meta.model_name}"
raise_exception = True
queryset = (
Job.objects.with_duration()
.prefetch_related(
"outputs__image__files",
"outputs__interface",
"inputs__image__files",
"viewers__user_set__user_profile",
"viewers__user_set__verification",
"viewer_groups",
)
.select_related(
"creator__user_profile",
"creator__verification",
"algorithm_image__algorithm__workstation",
)
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
viewers_form = ViewersForm()
viewers_form.fields["action"].initial = ViewersForm.REMOVE
context.update(
{
"viewers_form": viewers_form,
"job_perms": get_perms(self.request.user, self.object),
"algorithm_perms": get_perms(
self.request.user, self.object.algorithm_image.algorithm
),
}
)
return context
class JobUpdate(LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView):
model = Job
form_class = JobForm
permission_required = (
f"{Job._meta.app_label}.change_{Job._meta.model_name}"
)
raise_exception = True
class AlgorithmViewSet(ReadOnlyModelViewSet):
queryset = Algorithm.objects.all()
serializer_class = AlgorithmSerializer
permission_classes = [DjangoObjectPermissions]
filter_backends = [DjangoFilterBackend, ObjectPermissionsFilter]
filterset_fields = ["slug"]
class AlgorithmImageViewSet(ReadOnlyModelViewSet):
queryset = AlgorithmImage.objects.all()
serializer_class = AlgorithmImageSerializer
permission_classes = [DjangoObjectPermissions]
filter_backends = [DjangoFilterBackend, ObjectPermissionsFilter]
filterset_fields = ["algorithm"]
class JobViewSet(ReadOnlyModelViewSet):
queryset = (
Job.objects.all()
.prefetch_related("outputs__interface", "inputs__interface")
.select_related("algorithm_image__algorithm")
)
serializer_class = HyperlinkedJobSerializer
permission_classes = [DjangoObjectPermissions]
filter_backends = [DjangoFilterBackend, ObjectPermissionsFilter]
filterset_class = JobViewsetFilter
class AlgorithmPermissionRequestCreate(
UserIsNotAnonMixin, SuccessMessageMixin, CreateView
):
model = AlgorithmPermissionRequest
fields = ()
@property
def algorithm(self):
return get_object_or_404(Algorithm, slug=self.kwargs["slug"])
def get_success_url(self):
return self.algorithm.get_absolute_url()
def get_success_message(self, cleaned_data):
return self.object.status_to_string()
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.algorithm = self.algorithm
try:
redirect = super().form_valid(form)
return redirect
except ValidationError as e:
form._errors[NON_FIELD_ERRORS] = ErrorList(e.messages)
return super().form_invalid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
permission_request = AlgorithmPermissionRequest.objects.filter(
algorithm=self.algorithm, user=self.request.user
).first()
context.update(
{
"permission_request": permission_request,
"algorithm": self.algorithm,
}
)
return context
class AlgorithmPermissionRequestList(ObjectPermissionRequiredMixin, ListView):
model = AlgorithmPermissionRequest
permission_required = (
f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
)
raise_exception = True
@property
def algorithm(self):
return get_object_or_404(Algorithm, slug=self.kwargs["slug"])
def get_permission_object(self):
return self.algorithm
def get_queryset(self):
queryset = super().get_queryset()
queryset = (
queryset.filter(algorithm=self.algorithm)
.exclude(status=AlgorithmPermissionRequest.ACCEPTED)
.select_related("user__user_profile", "user__verification")
)
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({"algorithm": self.algorithm})
return context
class AlgorithmPermissionRequestUpdate(PermissionRequestUpdate):
model = AlgorithmPermissionRequest
form_class = AlgorithmPermissionRequestUpdateForm
base_model = Algorithm
redirect_namespace = "algorithms"
permission_required = (
f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({"algorithm": self.base_object})
return context
|
the-stack_0_12186 | #@mrlokaman
#@lntechnical
from pyrogram import Client, filters
import requests
import json
import os
TOKEN = os.environ.get("TOKEN", "")
API_ID = int(os.environ.get("API_ID",12345))
API_HASH = os.environ.get("API_HASH","")
BITLY_TOKEN = os.environ.get("BITLY_TOKEN","")
headers = {
'Authorization': BITLY_TOKEN,
'Content-Type': 'application/json',
}
app = Client("bitlybot" ,bot_token = TOKEN ,api_id = API_ID ,api_hash = API_HASH )
@app.on_message(filters.private & filters.command(['start']))
async def start(client,message):
await message.reply_text(f"Hello {message .from_user.first_name}\nhello i am bit.ly short link genrator\n made with love by @mrlokaman ", reply_to_message_id = message.message_id)
@app.on_message(filters.private & filters.regex("http|https"))
async def Bitly(client,message):
URL = message.text
DOMAIN = "sh.st"
value = {'long_url': URL , 'domain': DOMAIN}
data = json.dumps(value)
try:
r = requests.post('https://api.shorte.st/v1/data/url', headers=headers,data = data )
result = r.json()
link = result["link"]
await message.reply_text(f"```{link}```", reply_to_message_id= message.message_id)
except Exception as e :
await message.reply_text(e)
app.run()
|
the-stack_0_12189 | from __future__ import unicode_literals
import datetime
import uuid
from django.conf import settings
from django.core.exceptions import FieldError, ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import aggregates, fields
from django.utils import six, timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.duration import duration_string
try:
import pytz
except ImportError:
pytz = None
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_expression_support(self, expression):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev)
if isinstance(expression, bad_aggregates):
for expr in expression.get_source_expressions():
try:
output_field = expr.output_field
if isinstance(output_field, bad_fields):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev, and Variance '
'aggregations on date/time fields in sqlite3 '
'since date/time is saved as text.'
)
except FieldError:
# Not every subexpression has an output_field which is fine
# to ignore.
pass
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, timedelta):
return "'%s'" % duration_string(timedelta), []
def format_for_duration_arithmetic(self, sql):
"""Do nothing here, we will handle it in the custom function."""
return sql
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def _require_pytz(self):
if settings.USE_TZ and pytz is None:
raise ImproperlyConfigured("This query requires pytz, but it isn't installed.")
def datetime_cast_date_sql(self, field_name, tzname):
self._require_pytz()
return "django_datetime_cast_date(%s, %%s)" % field_name, [tzname]
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
self._require_pytz()
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
self._require_pytz()
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def time_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_time_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def _quote_params_for_last_executed_query(self, params):
"""
Only for last_executed_query! Don't use this to execute SQL queries!
"""
# This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the
# number of paramters, default = 999) and SQLITE_MAX_COLUMN (the
# number of return values, default = 2000). Since Python's sqlite3
# module doesn't expose the get_limit() C API, assume the default
# limits are in effect and split the work in batches if needed.
BATCH_SIZE = 999
if len(params) > BATCH_SIZE:
results = ()
for index in range(0, len(params), BATCH_SIZE):
chunk = params[index:index + BATCH_SIZE]
results += self._quote_params_for_last_executed_query(chunk)
return results
sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params))
# Bypass Django's wrappers and use the underlying sqlite3 connection
# to avoid logging this query - it would trigger infinite recursion.
cursor = self.connection.connection.cursor()
# Native sqlite3 cursors cannot be used as context managers.
try:
return cursor.execute(sql, params).fetchone()
finally:
cursor.close()
def last_executed_query(self, cursor, sql, params):
# Python substitutes parameters in Modules/_sqlite/cursor.c with:
# pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars);
# Unfortunately there is no way to reach self->statement from Python,
# so we quote and substitute parameters manually.
if params:
if isinstance(params, (list, tuple)):
params = self._quote_params_for_last_executed_query(params)
else:
keys = params.keys()
values = tuple(params.values())
values = self._quote_params_for_last_executed_query(values)
params = dict(zip(keys, values))
return sql % params
# For consistency with SQLiteCursorWrapper.execute(), just return sql
# when there are no parameters. See #13648 and #17158.
else:
return sql
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def adapt_datetimefield_value(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'DecimalField':
converters.append(self.convert_decimalfield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.datetime):
value = parse_datetime(value)
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.date):
value = parse_date(value)
return value
def convert_timefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.time):
value = parse_time(value)
return value
def convert_decimalfield_value(self, value, expression, connection, context):
if value is not None:
value = expression.output_field.format_number(value)
value = backend_utils.typecast_decimal(value)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def bulk_insert_sql(self, fields, placeholder_rows):
return " UNION ALL ".join(
"SELECT %s" % ", ".join(row)
for row in placeholder_rows
)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
if connector not in ['+', '-']:
raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector)
fn_params = ["'%s'" % connector] + sub_expressions
if len(fn_params) > 3:
raise ValueError('Too many params for timedelta operations.')
return "django_format_dtdelta(%s)" % ', '.join(fn_params)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
|
the-stack_0_12190 | from __future__ import unicode_literals
from django.core import serializers
from django.db import connection
from django.test import TestCase
from .models import Child, FKDataNaturalKey, NaturalKeyAnchor
from .tests import register_tests
class NaturalKeySerializerTests(TestCase):
pass
def natural_key_serializer_test(format, self):
# Create all the objects defined in the test data
with connection.constraint_checks_disabled():
objects = [
NaturalKeyAnchor.objects.create(id=1100, data="Natural Key Anghor"),
FKDataNaturalKey.objects.create(id=1101, data_id=1100),
FKDataNaturalKey.objects.create(id=1102, data_id=None),
]
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2, use_natural_foreign_keys=True)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for obj in objects:
instance = obj.__class__.objects.get(id=obj.pk)
self.assertEqual(
obj.data, instance.data,
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
obj.pk, obj.data, type(obj.data), instance, type(instance.data),
)
)
def natural_key_test(format, self):
book1 = {
'data': '978-1590597255',
'title': 'The Definitive Guide to Django: Web Development Done Right',
}
book2 = {'data': '978-1590599969', 'title': 'Practical Django Projects'}
# Create the books.
adrian = NaturalKeyAnchor.objects.create(**book1)
james = NaturalKeyAnchor.objects.create(**book2)
# Serialize the books.
string_data = serializers.serialize(
format, NaturalKeyAnchor.objects.all(), indent=2,
use_natural_foreign_keys=True, use_natural_primary_keys=True,
)
# Delete one book (to prove that the natural key generation will only
# restore the primary keys of books found in the database via the
# get_natural_key manager method).
james.delete()
# Deserialize and test.
books = list(serializers.deserialize(format, string_data))
self.assertEqual(len(books), 2)
self.assertEqual(books[0].object.title, book1['title'])
self.assertEqual(books[0].object.pk, adrian.pk)
self.assertEqual(books[1].object.title, book2['title'])
self.assertIsNone(books[1].object.pk)
def natural_pk_mti_test(format, self):
"""
If serializing objects in a multi-table inheritance relationship using
natural primary keys, the natural foreign key for the parent is output in
the fields of the child so it's possible to relate the child to the parent
when deserializing.
"""
child_1 = Child.objects.create(parent_data='1', child_data='1')
child_2 = Child.objects.create(parent_data='2', child_data='2')
string_data = serializers.serialize(
format,
[child_1.parent_ptr, child_2.parent_ptr, child_2, child_1],
use_natural_foreign_keys=True, use_natural_primary_keys=True,
)
child_1.delete()
child_2.delete()
for obj in serializers.deserialize(format, string_data):
obj.save()
children = Child.objects.all()
self.assertEqual(len(children), 2)
for child in children:
# If it's possible to find the superclass from the subclass and it's
# the correct superclass, it's working.
self.assertEqual(child.child_data, child.parent_data)
# Dynamically register tests for each serializer
register_tests(NaturalKeySerializerTests, 'test_%s_natural_key_serializer', natural_key_serializer_test)
register_tests(NaturalKeySerializerTests, 'test_%s_serializer_natural_keys', natural_key_test)
register_tests(NaturalKeySerializerTests, 'test_%s_serializer_natural_pks_mti', natural_pk_mti_test)
|
the-stack_0_12194 | #!/usr/bin/env python
import json
import pika
import requests
import time
import websocket
import servitor_utils
settings = servitor_utils.make_settings("settings.yml")
def send_ws_message(settings, message):
websocket_server = settings['websocket_local_server']
websocket_port = settings['websocket_local_port']
websocket_uri = "ws://{}:{}".format(websocket_server, websocket_port)
ws = websocket.create_connection(websocket_uri)
ws.send(message)
ws.recv()
ws.close()
def callback(ch, method, properties, body):
amqp_payload = json.loads(body)
message_payload = {"topic": method.routing_key,
"message": amqp_payload}
send_ws_message(settings=settings,
message=json.dumps(message_payload, ensure_ascii=False))
if __name__ == "__main__":
amqp_connection_params = pika.ConnectionParameters(host=settings['amqp_server'])
connection = pika.BlockingConnection(amqp_connection_params)
channel = connection.channel()
login_message = {"type": "status",
"message": "rabbitmq-receiver online"}
send_ws_message(settings=settings,
message=json.dumps(login_message, ensure_ascii=False))
channel.exchange_declare(exchange=settings['amqp_exchange'],
exchange_type="topic")
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
binding_keys = ["topic.twitch.follows", "topic.twitch.subscriptions", "topic.twitch.hosts"]
for binding_key in binding_keys:
channel.queue_bind(exchange=settings['amqp_exchange'],
queue=queue_name,
routing_key=binding_key)
queue_subscribe_message = {"topic": "status",
"message": "rabbitmq-receiver subscribed to {}".format(binding_key)}
send_ws_message(settings=settings,
message=json.dumps(queue_subscribe_message, ensure_ascii=False))
channel.basic_consume(callback, queue=queue_name,
no_ack=True)
channel.start_consuming()
|
the-stack_0_12196 | #!/usr/bin/env python
from django.test import TestCase
from nose.tools import assert_false, assert_true
from corehq.apps.hqcase.utils import update_case
from corehq.apps.sms.mixin import apply_leniency
from corehq.apps.sms.util import (
ContactNotFoundException,
clean_phone_number,
get_contact,
is_contact_active,
is_superuser_or_contractor,
)
from corehq.apps.users.models import CommCareUser, CouchUser
from corehq.form_processor.tests.utils import run_with_all_backends
from corehq.form_processor.utils import is_commcarecase
from corehq.util.test_utils import create_test_case, flag_enabled
class UtilTestCase(TestCase):
def setUp(self):
self.domain = 'test-domain'
self.user = CommCareUser.create(self.domain, 'test-user', '123', None, None)
def tearDown(self):
self.user.delete()
def testCleanPhoneNumber(self):
phone_number = " 324 23-23421241"
cleaned = clean_phone_number(phone_number)
self.assertEqual(cleaned, "+3242323421241")
@run_with_all_backends
def test_get_contact_for_case(self):
with create_test_case(self.domain, 'contact', 'test-case') as case:
contact = get_contact(self.domain, case.case_id)
self.assertEqual(contact.case_id, case.case_id)
self.assertTrue(is_commcarecase(contact))
with self.assertRaises(ContactNotFoundException):
get_contact(self.domain + 'x', case.case_id)
def test_get_contact_for_user(self):
contact = get_contact(self.domain, self.user.get_id)
self.assertEqual(contact.get_id, self.user.get_id)
self.assertTrue(isinstance(contact, CommCareUser))
with self.assertRaises(ContactNotFoundException):
get_contact(self.domain + 'x', self.user.get_id)
def test_contact_not_found(self):
with self.assertRaises(ContactNotFoundException):
get_contact(self.domain, 'this-id-should-not-be-found')
@run_with_all_backends
def test_is_contact_active_for_case(self):
with create_test_case(self.domain, 'contact', 'test-case') as case:
self.assertTrue(is_contact_active(self.domain, 'CommCareCase', case.case_id))
update_case(self.domain, case.case_id, close=True)
self.assertFalse(is_contact_active(self.domain, 'CommCareCase', case.case_id))
def test_is_contact_active_for_user(self):
self.assertTrue(is_contact_active(self.domain, 'CommCareUser', self.user.get_id))
self.user.is_active = False
self.user.save()
self.assertFalse(is_contact_active(self.domain, 'CommCareUser', self.user.get_id))
self.user.is_active = True
self.user.save()
self.assertTrue(is_contact_active(self.domain, 'CommCareUser', self.user.get_id))
def test_apply_leniency(self):
self.assertEqual('16175551234', apply_leniency(' 1 (617) 555-1234 '))
self.assertEqual('16175551234', apply_leniency(' 1.617.555.1234 '))
self.assertEqual('16175551234', apply_leniency(' +1 617 555 1234 '))
def test_contractor():
user = CouchUser(username="eric")
with flag_enabled('IS_CONTRACTOR'):
assert_true(is_superuser_or_contractor(user))
def test_superuser():
user = CouchUser(username="john", is_superuser=True)
assert_true(is_superuser_or_contractor(user))
def test_normal_user():
user = CouchUser(username="michael")
assert_false(is_superuser_or_contractor(user))
|
the-stack_0_12197 | from pandac.PandaModules import NodePath, Plane, Vec3, Point3
from pandac.PandaModules import CollisionPlane, CollisionNode
from direct.showbase.RandomNumGen import RandomNumGen
from direct.showbase.DirectObject import DirectObject
from direct.showbase.PythonUtil import bound as clamp
from . import CogdoUtil
from . import CogdoFlyingGameGlobals as Globals
from .CogdoFlyingLevelQuadrant import CogdoFlyingLevelQuadrant
from .CogdoFlyingObjects import CogdoFlyingGatherableFactory, CogdoFlyingPlatform, CogdoFlyingLevelFog
from .CogdoFlyingObstacles import CogdoFlyingObtacleFactory
from .CogdoGameExit import CogdoGameExit
from otp.otpbase import OTPGlobals
class CogdoFlyingLevel(DirectObject):
notify = directNotify.newCategory('CogdoFlyingLevel')
def __init__(self, parent, frameModel, startPlatformModel, endPlatformModel, quadLengthUnits, quadVisibilityAhead, quadVisibiltyBehind):
self.parent = parent
self.quadLengthUnits = quadLengthUnits
self._halfQuadLengthUnits = quadLengthUnits / 2.0
self.quadVisibiltyAhead = quadVisibilityAhead
self.quadVisibiltyBehind = quadVisibiltyBehind
self._frameModel = frameModel
self.root = NodePath('CogdoFlyingLevel')
self.quadrantRoot = NodePath('QuadrantsRoot')
self.quadrantRoot.reparentTo(self.root)
self._startPlatformModel = startPlatformModel
self._startPlatformModel.reparentTo(self.root)
self._startPlatformModel.setZ(Globals.Level.StartPlatformHeight)
self._endPlatformModel = endPlatformModel
self._endPlatformModel.reparentTo(self.root)
self._endPlatformModel.setZ(Globals.Level.EndPlatformHeight)
self.wallR = self._frameModel.find('**/wallR')
self.wallL = self._frameModel.find('**/wallL')
self._exit = CogdoGameExit()
self._exit.reparentTo(self._endPlatformModel)
loc = self._endPlatformModel.find('**/exit_loc')
offset = loc.getPos(render)
self._exit.setPos(render, offset)
self.quadrants = []
self.visibleQuadIndices = []
self._numQuads = 0
self._currentQuadNum = -1
self._camera = None
self._initCollisions()
self.upLimit = self._frameModel.find('**/limit_up').getZ(render)
self.downLimit = self._frameModel.find('**/limit_down').getZ(render)
self.leftLimit = self._frameModel.find('**/limit_left').getX(render) - 30.0
self.rightLimit = self._frameModel.find('**/limit_right').getX(render) + 30.0
self.backLimit = -self.quadLengthUnits
self.forwardLimit = self.quadLengthUnits * 20
self._frameModel.flattenStrong()
self.gatherableFactory = CogdoFlyingGatherableFactory()
self.obstacleFactory = CogdoFlyingObtacleFactory()
return
def getExit(self):
return self._exit
def getBounds(self):
return ((self.leftLimit, self.rightLimit), (self.backLimit, self.forwardLimit), (self.downLimit, self.upLimit))
def getGatherable(self, serialNum):
for quadrant in self.quadrants:
for gatherable in quadrant.gatherables:
if gatherable.serialNum == serialNum:
return gatherable
return None
def ready(self):
self.gatherableFactory.destroy()
del self.gatherableFactory
self.obstacleFactory.destroy()
del self.obstacleFactory
self._initStartEndPlatforms()
self._frameModel.reparentTo(self.root)
self.root.reparentTo(self.parent)
self.root.stash()
def _initStartEndPlatforms(self):
self.startPlatform = CogdoFlyingPlatform(self._startPlatformModel, Globals.Level.PlatformTypes.StartPlatform)
self.endPlatform = CogdoFlyingPlatform(self._endPlatformModel, Globals.Level.PlatformTypes.EndPlatform)
self._endPlatformModel.setY(self.convertQuadNumToY(self._numQuads))
self.backLimit = self._startPlatformModel.getY(render) - Globals.Level.StartPlatformLength * 0.7
self.forwardLimit = self._endPlatformModel.getY(render) + Globals.Level.EndPlatformLength * 0.7
def _initCollisions(self):
self.collPlane = CollisionPlane(Plane(Vec3(0, 0, 1.0), Point3(0, 0, 10)))
self.collPlane.setTangible(0)
self.collNode = CollisionNode('fogPlane')
self.collNode.setIntoCollideMask(OTPGlobals.FloorBitmask)
self.collNode.addSolid(self.collPlane)
self.collNodePath = self.root.attachNewNode(self.collNode)
self.collNodePath.hide()
def destroy(self):
del self.collPlane
self.collNodePath.removeNode()
del self.collNodePath
del self.collNode
for quadrant in self.quadrants:
quadrant.destroy()
self._exit.destroy()
del self._exit
self.root.removeNode()
del self.root
def onstage(self):
self.root.unstash()
self.update(0.0)
def offstage(self):
self.root.stash()
def start(self, startTime = 0.0):
self._startTime = startTime
def stop(self):
pass
def getLength(self):
return self.quadLengthUnits * self.getNumQuadrants()
def appendQuadrant(self, model):
quadrant = CogdoFlyingLevelQuadrant(self._numQuads, model, self, self.root)
if self._numQuads == 0:
quadrant.generateGatherables(self._startPlatformModel)
quadrant.offstage()
self.quadrants.append(quadrant)
self._numQuads = len(self.quadrants)
def getNumQuadrants(self):
return self._numQuads
def setCamera(self, camera):
self._camera = camera
def getCameraActualQuadrant(self):
camY = self._camera.getY(render)
y = self.root.getY(render)
return self.convertYToQuadNum(camY - y)
def update(self, dt = 0.0):
if self._camera is None:
return
quadNum = clamp(self.getCameraActualQuadrant(), 0, self._numQuads - 1)
if quadNum < self._numQuads:
self.quadrants[quadNum].update(dt)
if quadNum + 1 < self._numQuads:
self.quadrants[quadNum + 1].update(dt)
if quadNum != self._currentQuadNum:
self._switchToQuadrant(quadNum)
return
def _switchToQuadrant(self, quadNum):
self.visibleQuadIndices = []
if quadNum >= 0:
if quadNum > 0:
self.quadrants[max(quadNum - self.quadVisibiltyBehind, 0)].onstage()
for i in range(quadNum, min(quadNum + self.quadVisibiltyAhead + 1, self._numQuads)):
self.quadrants[i].onstage()
self.visibleQuadIndices.append(i)
if i == 0:
self.startPlatform.onstage()
elif i == self._numQuads - 1:
self.endPlatform.onstage()
self._currentQuadNum = quadNum
for i in list(range(0, max(self._currentQuadNum - self.quadVisibiltyBehind, 0))) + list(range(min(self._currentQuadNum + self.quadVisibiltyAhead + 1, self._numQuads), self._numQuads)):
self.quadrants[i].offstage()
if i == 0:
self.startPlatform.offstage()
elif i == self._numQuads - 1:
self.endPlatform.offstage()
def convertQuadNumToY(self, quadNum):
return quadNum * self.quadLengthUnits
def convertYToQuadNum(self, y):
return int(y / self.quadLengthUnits)
def convertCenterYToQuadNum(self, y):
return self.convertYToQuadNum(y + self._halfQuadLengthUnits)
class CogdoFlyingLevelFactory:
def __init__(self, parent, quadLengthUnits, quadVisibilityAhead, quadVisibiltyBehind, rng = None):
self.parent = parent
self.quadLengthUnits = quadLengthUnits
self.quadVisibiltyAhead = quadVisibilityAhead
self.quadVisibiltyBehind = quadVisibiltyBehind
self._rng = rng or RandomNumGen(1)
self._level = None
return
def loadAndBuildLevel(self, safezoneId):
levelNode = NodePath('level')
frameModel = CogdoUtil.loadFlyingModel('level')
startPlatformModel = CogdoUtil.loadFlyingModel('levelStart')
endPlatformModel = CogdoUtil.loadFlyingModel('levelEnd')
for fan in frameModel.findAllMatches('**/*wallFan'):
fan.flattenStrong()
frameModel.find('**/fogOpaque').setBin('background', 1)
frameModel.find('**/ceiling').setBin('background', 2)
frameModel.find('**/fogTranslucent_bm').setBin('fixed', 1)
frameModel.find('**/wallR').setBin('opaque', 2)
frameModel.find('**/wallL').setBin('opaque', 2)
frameModel.find('**/fogTranslucent_top').setBin('fixed', 2)
frameModel.getChildren().reparentTo(levelNode)
levelNode.hide()
self._level = CogdoFlyingLevel(self.parent, levelNode, startPlatformModel, endPlatformModel, self.quadLengthUnits, self.quadVisibiltyAhead, self.quadVisibiltyBehind)
if Globals.Dev.WantTempLevel:
quads = Globals.Dev.DevQuadsOrder
else:
levelInfo = Globals.Level.DifficultyOrder[safezoneId]
quads = []
for difficulty in levelInfo:
quadList = Globals.Level.QuadsByDifficulty[difficulty]
quads.append(quadList[self._rng.randint(0, len(quadList) - 1)])
for i in quads:
filePath = CogdoUtil.getModelPath('quadrant%i' % i, 'flying')
quadModel = loader.loadModel(filePath)
for np in quadModel.findAllMatches('**/*lightCone*'):
CogdoUtil.initializeLightCone(np, 'fixed', 3)
self._level.appendQuadrant(quadModel)
self._level.ready()
def createLevel(self, safezoneId = 2000):
if self._level is None:
self.loadAndBuildLevel(safezoneId)
return self._level
def createLevelFog(self):
if self._level is None:
self.loadAndBuildLevel()
return CogdoFlyingLevelFog(self._level)
|
the-stack_0_12199 | # MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Unit tests for the Python scanner. These tests validate proper working of the
Python scanner by confirming that the results of the scan match expectations.
The absolute path tests have strongly-defined behavior in that there is no real
ambiguity to what they should result in. For example, if you import package x,
you expect to get x/__init__.py as a dependency.
The relative path tests that reach into ancestor directories do have some
ambiguity in whether to depend upon __init__.py in those referenced ancestor
directories. Python only allows these kinds of relative imports if the file is
part of a package, in which case those ancestor directories' __init__.py files
have already been imported.
"""
import SCons.compat
import collections
import os
import unittest
import TestCmd
import SCons.Node.FS
import SCons.Scanner.Python
test = TestCmd.TestCmd(workdir='')
test.dir_fixture('python_scanner')
if os.path.normcase('foo') == os.path.normcase('FOO'):
my_normpath = os.path.normcase
else:
my_normpath = os.path.normpath
def deps_match(self, deps, headers):
global my_normpath
scanned = list(map(my_normpath, list(map(str, deps))))
expect = list(map(my_normpath, headers))
self.assertTrue(scanned == expect,
"expect %s != scanned %s" % (expect, scanned))
# Copied from LaTeXTests.py.
class DummyEnvironment(collections.UserDict):
def __init__(self, **kwargs):
super().__init__()
self.data.update(kwargs)
self.fs = SCons.Node.FS.FS(test.workpath(''))
self['ENV'] = {}
def Dictionary(self, *args):
return self.data
def subst(self, strSubst, target=None, source=None, conv=None):
if strSubst[0] == '$':
return self.data[strSubst[1:]]
return strSubst
def subst_list(self, strSubst, target=None, source=None, conv=None):
if strSubst[0] == '$':
return [self.data[strSubst[1:]]]
return [[strSubst]]
def subst_path(self, path, target=None, source=None, conv=None):
if not isinstance(path, list):
path = [path]
return list(map(self.subst, path))
def get_calculator(self):
return None
def get_factory(self, factory):
return factory or self.fs.File
def Dir(self, filename):
return self.fs.Dir(filename)
def File(self, filename):
return self.fs.File(filename)
class PythonScannerTestPythonPath(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
env['ENV']['PYTHONPATH'] = test.workpath('')
path = s.path(env)
deps = s(env.File('imports_simple_package.py'), env, path)
files = ['simple_package/__init__.py']
deps_match(self, deps, files)
class PythonScannerTestPythonCallablePath(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
env['ENV']['PYTHONPATH'] = test.workpath('')
deps = s(env.File('imports_simple_package.py'), env,
lambda : s.path(env))
files = ['simple_package/__init__.py']
deps_match(self, deps, files)
class PythonScannerTestImportSimplePackage(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File('imports_simple_package.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['simple_package/__init__.py']
deps_match(self, deps, files)
# Repeat the test in case there are any issues caching includes.
deps = s(node, env, path)
deps_match(self, deps, files)
class PythonScannerTestImportSimplePackageModule1As(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File('import_simple_package_module1_as.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['simple_package/__init__.py', 'simple_package/module1.py']
deps_match(self, deps, files)
class PythonScannerTestImportSimplePackageModuleAs(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File('import_simple_package_module1.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['simple_package/__init__.py', 'simple_package/module1.py']
deps_match(self, deps, files)
class PythonScannerTestFromImportSimplePackageModule1(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File('from_import_simple_package_module1.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['simple_package/__init__.py', 'simple_package/module1.py']
deps_match(self, deps, files)
class PythonScannerTestFromImportSimplePackageModule1As(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File('from_import_simple_package_module1_as.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['simple_package/__init__.py', 'simple_package/module1.py']
deps_match(self, deps, files)
class PythonScannerTestFromImportSimplePackageModulesNoSpace(
unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File('from_import_simple_package_modules_no_space.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['simple_package/__init__.py', 'simple_package/module1.py',
'simple_package/module2.py']
deps_match(self, deps, files)
class PythonScannerTestFromImportSimplePackageModulesWithSpace(
unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File('from_import_simple_package_modules_with_space.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['simple_package/__init__.py', 'simple_package/module1.py',
'simple_package/module2.py']
deps_match(self, deps, files)
class PythonScannerTestCurdirReferenceScript(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.Dir('curdir_reference').File('script.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['curdir_reference/helper.py']
deps_match(self, deps, files)
class PythonScannerTestImportsNested3(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File('imports_nested3.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['nested1/__init__.py', 'nested1/nested2/__init__.py',
'nested1/nested2/nested3/__init__.py']
deps_match(self, deps, files)
class PythonScannerTestImportsGrandparentModule(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File(
'nested1/nested2/nested3/imports_grandparent_module.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['nested1/module.py']
deps_match(self, deps, files)
class PythonScannerTestImportsParentModule(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File(
'nested1/nested2/nested3/imports_parent_module.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['nested1/nested2/module.py']
deps_match(self, deps, files)
class PythonScannerTestImportsParentThenSubmodule(unittest.TestCase):
def runTest(self):
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
node = env.File(
'nested1/nested2/nested3/imports_parent_then_submodule.py')
path = s.path(env, source=[node])
deps = s(node, env, path)
files = ['nested1/nested2a/__init__.py', 'nested1/nested2a/module.py']
deps_match(self, deps, files)
class PythonScannerTestImportsModuleWithFunc(unittest.TestCase):
def runTest(self):
"""
This test case tests the following import statement:
`from simple_package.module1 import somefunc` with somefunc.py existing
in the same folder as module1.py. It validates that the scanner doesn't
accidentally take a dependency somefunc.py.
"""
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
env['ENV']['PYTHONPATH'] = test.workpath('')
deps = s(env.File('from_import_simple_package_module1_func.py'), env,
lambda : s.path(env))
files = ['simple_package/__init__.py', 'simple_package/module1.py']
deps_match(self, deps, files)
class PythonScannerTestFromNested1ImportNested2(unittest.TestCase):
def runTest(self):
"""
This test case tests the following import statement:
`from nested1 import module, nested2`. In this test, module is a Python
module and nested2 is a package. Validates that the scanner can handle
such mixed imports.
"""
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
env['ENV']['PYTHONPATH'] = test.workpath('')
deps = s(env.File('from_nested1_import_multiple.py'), env,
lambda : s.path(env))
files = ['nested1/__init__.py', 'nested1/module.py',
'nested1/nested2/__init__.py']
deps_match(self, deps, files)
class PythonScannerTestImportUnknownFiles(unittest.TestCase):
def runTest(self):
"""
This test case tests importing files that are not found. If Python
really can't find those files, it will fail. But this is intended to
test the various failure paths in the scanner to make sure that they
don't raise exceptions.
"""
env = DummyEnvironment()
s = SCons.Scanner.Python.PythonScanner
env['ENV']['PYTHONPATH'] = test.workpath('')
deps = s(env.File('imports_unknown_files.py'), env,
lambda : s.path(env))
files = []
deps_match(self, deps, files)
if __name__ == "__main__":
unittest.main()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_12200 | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import base64
import hmac
import os
import json
import ecdsa
import pyaes
from .util import bfh, bh2u, to_string
from . import version
from .util import print_error, InvalidPassword, assert_bytes, to_bytes, inv_dict
from . import segwit_addr
def read_json_dict(filename):
path = os.path.join(os.path.dirname(__file__), filename)
try:
with open(path, 'r') as f:
r = json.loads(f.read())
except:
r = {}
return r
# Version numbers for BIP32 extended keys
# standard: xprv, xpub
# segwit in p2sh: yprv, ypub
# native segwit: zprv, zpub
XPRV_HEADERS = {
'standard': 0x0488ade4,
'p2wpkh-p2sh': 0x049d7878,
'p2wsh-p2sh': 0x295b005,
'p2wpkh': 0x4b2430c,
'p2wsh': 0x2aa7a99
}
XPUB_HEADERS = {
'standard': 0x0488b21e,
'p2wpkh-p2sh': 0x049d7cb2,
'p2wsh-p2sh': 0x295b43f,
'p2wpkh': 0x4b24746,
'p2wsh': 0x2aa7ed3
}
class NetworkConstants:
@classmethod
def set_mainnet(cls):
cls.TESTNET = False
cls.WIF_PREFIX = 0x80
cls.ADDRTYPE_P2PKH = 0
cls.ADDRTYPE_P2SH = 5
cls.SEGWIT_HRP = "bc"
cls.HEADERS_URL = "https://headers.electrum.org/blockchain_headers"
cls.GENESIS = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
cls.DEFAULT_PORTS = {'t': '50001', 's': '50002'}
cls.DEFAULT_SERVERS = read_json_dict('servers.json')
@classmethod
def set_testnet(cls):
cls.TESTNET = True
cls.WIF_PREFIX = 0xef
cls.ADDRTYPE_P2PKH = 111
cls.ADDRTYPE_P2SH = 196
cls.SEGWIT_HRP = "tb"
cls.HEADERS_URL = "https://headers.electrum.org/testnet_headers"
cls.GENESIS = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"
cls.DEFAULT_PORTS = {'t':'51001', 's':'51002'}
cls.DEFAULT_SERVERS = read_json_dict('servers_testnet.json')
NetworkConstants.set_mainnet()
################################## transactions
FEE_STEP = 10000
MAX_FEE_RATE = 300000
FEE_TARGETS = [25, 10, 5, 2]
COINBASE_MATURITY = 100
COIN = 100000000
# supported types of transction outputs
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
# AES encryption
try:
from Cryptodome.Cipher import AES
except:
AES = None
class InvalidPadding(Exception):
pass
def append_PKCS7_padding(data):
assert_bytes(data)
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data):
assert_bytes(data)
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if padlen > 16:
raise InvalidPadding("invalid padding byte (large)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
data = append_PKCS7_padding(data)
if AES:
e = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
if AES:
cipher = AES.new(key, AES.MODE_CBC, iv)
data = cipher.decrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
def EncodeAES(secret, s):
assert_bytes(s)
iv = bytes(os.urandom(16))
ct = aes_encrypt_with_iv(secret, iv, s)
e = iv + ct
return base64.b64encode(e)
def DecodeAES(secret, e):
e = bytes(base64.b64decode(e))
iv, e = e[:16], e[16:]
s = aes_decrypt_with_iv(secret, iv, e)
return s
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, to_bytes(s, "utf8")).decode('utf8')
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = to_string(DecodeAES(secret, s), "utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def rev_hex(s):
return bh2u(bfh(s)[::-1])
def int_to_hex(i, length=1):
assert isinstance(i, int)
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def op_push(i):
if i<0x4c:
return int_to_hex(i)
elif i<0xff:
return '4c' + int_to_hex(i)
elif i<0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def push_script(x):
return op_push(len(x)//2) + x
def sha256(x):
x = to_bytes(x, 'utf8')
return bytes(hashlib.sha256(x).digest())
def Hash(x):
x = to_bytes(x, 'utf8')
out = bytes(sha256(sha256(x)))
return out
hash_encode = lambda x: bh2u(x[::-1])
hash_decode = lambda x: bfh(x)[::-1]
hmac_sha_512 = lambda x, y: hmac.new(x, y, hashlib.sha512).digest()
def is_new_seed(x, prefix=version.SEED_PREFIX):
from . import mnemonic
x = mnemonic.normalize_text(x)
s = bh2u(hmac_sha_512(b"Seed version", x.encode('utf8')))
return s.startswith(prefix)
def is_old_seed(seed):
from . import old_mnemonic, mnemonic
seed = mnemonic.normalize_text(seed)
words = seed.split()
try:
# checks here are deliberately left weak for legacy reasons, see #3149
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed = bfh(seed)
is_hex = (len(seed) == 16 or len(seed) == 32)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
def seed_type(x):
if is_old_seed(x):
return 'old'
elif is_new_seed(x):
return 'standard'
elif is_new_seed(x, version.SEED_PREFIX_SW):
return 'segwit'
elif is_new_seed(x, version.SEED_PREFIX_2FA):
return '2fa'
return ''
is_seed = lambda x: bool(seed_type(x))
# pywallet openssl private key implementation
def i2o_ECPublicKey(pubkey, compressed=False):
# public keys are 65 bytes long (520 bits)
# 0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
# 0x00 = point at infinity, 0x02 and 0x03 = compressed, 0x04 = uncompressed
# compressed keys: <sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
if compressed:
if pubkey.point.y() & 1:
key = '03' + '%064x' % pubkey.point.x()
else:
key = '02' + '%064x' % pubkey.point.x()
else:
key = '04' + \
'%064x' % pubkey.point.x() + \
'%064x' % pubkey.point.y()
return bfh(key)
# end pywallet openssl private key implementation
############ functions from pywallet #####################
def hash_160(public_key):
try:
md = hashlib.new('ripemd160')
md.update(sha256(public_key))
return md.digest()
except BaseException:
from . import ripemd
md = ripemd.new(sha256(public_key))
return md.digest()
def hash160_to_b58_address(h160, addrtype, witness_program_version=1):
s = bytes([addrtype])
s += h160
return base_encode(s+Hash(s)[0:4], base=58)
def b58_address_to_hash160(addr):
addr = to_bytes(addr, 'ascii')
_bytes = base_decode(addr, 25, base=58)
return _bytes[0], _bytes[1:21]
def hash160_to_p2pkh(h160):
return hash160_to_b58_address(h160, NetworkConstants.ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160):
return hash160_to_b58_address(h160, NetworkConstants.ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key):
return hash160_to_p2pkh(hash_160(public_key))
def hash_to_segwit_addr(h):
return segwit_addr.encode(NetworkConstants.SEGWIT_HRP, 0, h)
def public_key_to_p2wpkh(public_key):
return hash_to_segwit_addr(hash_160(public_key))
def script_to_p2wsh(script):
return hash_to_segwit_addr(sha256(bfh(script)))
def p2wpkh_nested_script(pubkey):
pkh = bh2u(hash_160(bfh(pubkey)))
return '00' + push_script(pkh)
def p2wsh_nested_script(witness_script):
wsh = bh2u(sha256(bfh(witness_script)))
return '00' + push_script(wsh)
def pubkey_to_address(txin_type, pubkey):
if txin_type == 'p2pkh':
return public_key_to_p2pkh(bfh(pubkey))
elif txin_type == 'p2wpkh':
return hash_to_segwit_addr(hash_160(bfh(pubkey)))
elif txin_type == 'p2wpkh-p2sh':
scriptSig = p2wpkh_nested_script(pubkey)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def redeem_script_to_address(txin_type, redeem_script):
if txin_type == 'p2sh':
return hash160_to_p2sh(hash_160(bfh(redeem_script)))
elif txin_type == 'p2wsh':
return script_to_p2wsh(redeem_script)
elif txin_type == 'p2wsh-p2sh':
scriptSig = p2wsh_nested_script(redeem_script)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def script_to_address(script):
from .transaction import get_address_from_output_script
t, addr = get_address_from_output_script(bfh(script))
assert t == TYPE_ADDRESS
return addr
def address_to_script(addr):
witver, witprog = segwit_addr.decode(NetworkConstants.SEGWIT_HRP, addr)
if witprog is not None:
assert (0 <= witver <= 16)
OP_n = witver + 0x50 if witver > 0 else 0
script = bh2u(bytes([OP_n]))
script += push_script(bh2u(bytes(witprog)))
return script
addrtype, hash_160 = b58_address_to_hash160(addr)
if addrtype == NetworkConstants.ADDRTYPE_P2PKH:
script = '76a9' # op_dup, op_hash_160
script += push_script(bh2u(hash_160))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == NetworkConstants.ADDRTYPE_P2SH:
script = 'a9' # op_hash_160
script += push_script(bh2u(hash_160))
script += '87' # op_equal
else:
raise BaseException('unknown address type')
return script
def address_to_scripthash(addr):
script = address_to_script(addr)
return script_to_scripthash(script)
def script_to_scripthash(script):
h = sha256(bytes.fromhex(script))[0:32]
return bh2u(bytes(reversed(h)))
def public_key_to_p2pk_script(pubkey):
script = push_script(pubkey)
script += 'ac' # op_checksig
return script
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v, base):
""" encode v, which is a string of bytes, to base58."""
assert_bytes(v)
assert base in (58, 43)
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * c
result = bytearray()
while long_value >= base:
div, mod = divmod(long_value, base)
result.append(chars[mod])
long_value = div
result.append(chars[long_value])
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0x00:
nPad += 1
else:
break
result.extend([chars[0]] * nPad)
result.reverse()
return result.decode('ascii')
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
# assert_bytes(v)
v = to_bytes(v, 'ascii')
assert base in (58, 43)
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += chars.find(bytes([c])) * (base**i)
result = bytearray()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result.append(mod)
long_value = div
result.append(long_value)
nPad = 0
for c in v:
if c == chars[0]:
nPad += 1
else:
break
result.extend(b'\x00' * nPad)
if length is not None and len(result) != length:
return None
result.reverse()
return bytes(result)
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
# extended key export format for segwit
SCRIPT_TYPES = {
'p2pkh':0,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':5,
'p2wsh':6,
'p2wsh-p2sh':7
}
def serialize_privkey(secret, compressed, txin_type):
prefix = bytes([(SCRIPT_TYPES[txin_type]+NetworkConstants.WIF_PREFIX)&255])
suffix = b'\01' if compressed else b''
vchIn = prefix + secret + suffix
return EncodeBase58Check(vchIn)
def deserialize_privkey(key):
# whether the pubkey is compressed should be visible from the keystore
vch = DecodeBase58Check(key)
if is_minikey(key):
return 'p2pkh', minikey_to_private_key(key), True
elif vch:
txin_type = inv_dict(SCRIPT_TYPES)[vch[0] - NetworkConstants.WIF_PREFIX]
assert len(vch) in [33, 34]
compressed = len(vch) == 34
return txin_type, vch[1:33], compressed
else:
raise BaseException("cannot deserialize", key)
def regenerate_key(pk):
assert len(pk) == 32
return EC_KEY(pk)
def GetPubKey(pubkey, compressed=False):
return i2o_ECPublicKey(pubkey, compressed)
def GetSecret(pkey):
return bfh('%064x' % pkey.secret)
def is_compressed(sec):
return deserialize_privkey(sec)[2]
def public_key_from_private_key(pk, compressed):
pkey = regenerate_key(pk)
public_key = GetPubKey(pkey.pubkey, compressed)
return bh2u(public_key)
def address_from_private_key(sec):
txin_type, privkey, compressed = deserialize_privkey(sec)
public_key = public_key_from_private_key(privkey, compressed)
return pubkey_to_address(txin_type, public_key)
def is_segwit_address(addr):
try:
witver, witprog = segwit_addr.decode(NetworkConstants.SEGWIT_HRP, addr)
except Exception as e:
return False
return witprog is not None
def is_b58_address(addr):
try:
addrtype, h = b58_address_to_hash160(addr)
except Exception as e:
return False
if addrtype not in [NetworkConstants.ADDRTYPE_P2PKH, NetworkConstants.ADDRTYPE_P2SH]:
return False
return addr == hash160_to_b58_address(h, addrtype)
def is_address(addr):
return is_segwit_address(addr) or is_b58_address(addr)
def is_private_key(key):
try:
k = deserialize_privkey(key)
return k is not False
except:
return False
########### end pywallet functions #######################
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitcoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text):
return sha256(text)
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
def msg_magic(message):
length = bfh(var_int(len(message)))
return b"\x18Bitcoin Signed Message:\n" + length + message
def verify_message(address, sig, message):
assert_bytes(sig, message)
try:
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key using the address
pubkey = point_to_ser(public_key.pubkey.point, compressed)
for txin_type in ['p2pkh','p2wpkh','p2wpkh-p2sh']:
addr = pubkey_to_address(txin_type, bh2u(pubkey))
if address == addr:
break
else:
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def encrypt_message(message, pubkey):
return EC_KEY.encrypt_message(message, bfh(pubkey))
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
def ECC_YfromX(x,curved=curve_secp256k1, odd=True):
_p = curved.p()
_a = curved.a()
_b = curved.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p+1)//4, _p )
if curved.contains_point(Mx,My):
if odd == bool(My&1):
return [My,offset]
return [_p-My,offset]
raise Exception('ECC_YfromX: No Y found')
def negative_point(P):
return Point( P.curve(), P.x(), -P.y(), P.order() )
def point_to_ser(P, comp=True ):
if comp:
return bfh( ('%02x'%(2+(P.y()&1)))+('%064x'%P.x()) )
return bfh( '04'+('%064x'%P.x())+('%064x'%P.y()) )
def ser_to_point(Aser):
curve = curve_secp256k1
generator = generator_secp256k1
_r = generator.order()
assert Aser[0] in [0x02, 0x03, 0x04]
if Aser[0] == 0x04:
return Point( curve, string_to_number(Aser[1:33]), string_to_number(Aser[33:]), _r )
Mx = string_to_number(Aser[1:])
return Point( curve, Mx, ECC_YfromX(Mx, curve, Aser[0] == 0x03)[0], _r )
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
from . import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid//2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
Q = inv_r * ( s * R + minus_e * G )
return klass.from_public_point( Q, curve )
def pubkey_from_signature(sig, h):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return MyVerifyingKey.from_signature(sig[1:], recid, h, curve = SECP256k1), compressed
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order//2:
s = order - s
return r, s
class EC_KEY(object):
def __init__( self, k ):
secret = string_to_number(k)
self.pubkey = ecdsa.ecdsa.Public_key( generator_secp256k1, generator_secp256k1 * secret )
self.privkey = ecdsa.ecdsa.Private_key( self.pubkey, secret )
self.secret = secret
def get_public_key(self, compressed=True):
return bh2u(point_to_ser(self.pubkey.point, compressed))
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(self.secret, curve = SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(msg_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_string)
assert public_key.verify_digest(signature, msg_hash, sigdecode = ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message, is_compressed):
message = to_bytes(message, 'utf8')
signature = self.sign(Hash(msg_magic(message)))
for i in range(4):
sig = bytes([27 + i + (4 if is_compressed else 0)]) + signature
try:
self.verify_message(sig, message)
return sig
except Exception as e:
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message):
assert_bytes(message)
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message, pubkey):
assert_bytes(message)
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(ecdsa.util.randrange(pow(2,256)), generator_secp256k1.order())
ephemeral = EC_KEY(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = bfh(ephemeral.get_public_key(compressed=True))
encrypted = b'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != b'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
###################################### BIP32 ##############################
random_seed = lambda n: "%032x"%ecdsa.util.randrange( pow(2,n) )
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
private_key = ecdsa.SigningKey.from_string( secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
K = public_key.to_string()
K_compressed = GetPubKey(public_key.pubkey,True)
return K, K_compressed
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is negative (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is positive, the resulting private key's corresponding
# public key can be determined without the master private key.
def CKD_priv(k, c, n):
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, bfh(rev_hex(int_to_hex(n,4))), is_prime)
def _CKD_priv(k, c, s, is_prime):
order = generator_secp256k1.order()
keypair = EC_KEY(k)
cK = GetPubKey(keypair.pubkey,True)
data = bytes([0]) + k + s if is_prime else cK + s
I = hmac.new(c, data, hashlib.sha512).digest()
k_n = number_to_string( (string_to_number(I[0:32]) + string_to_number(k)) % order , order )
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# non-negative. If n is negative, we need the master private key to find it.
def CKD_pub(cK, c, n):
if n & BIP32_PRIME: raise
return _CKD_pub(cK, c, bfh(rev_hex(int_to_hex(n,4))))
# helper function, callable with arbitrary string
def _CKD_pub(cK, c, s):
order = generator_secp256k1.order()
I = hmac.new(c, cK + s, hashlib.sha512).digest()
curve = SECP256k1
pubkey_point = string_to_number(I[0:32])*curve.generator + ser_to_point(cK)
public_key = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
c_n = I[32:]
cK_n = GetPubKey(public_key.pubkey,True)
return cK_n, c_n
def xprv_header(xtype):
return bfh("%08x" % XPRV_HEADERS[xtype])
def xpub_header(xtype):
return bfh("%08x" % XPUB_HEADERS[xtype])
def serialize_xprv(xtype, c, k, depth=0, fingerprint=b'\x00'*4, child_number=b'\x00'*4):
xprv = xprv_header(xtype) + bytes([depth]) + fingerprint + child_number + c + bytes([0]) + k
return EncodeBase58Check(xprv)
def serialize_xpub(xtype, c, cK, depth=0, fingerprint=b'\x00'*4, child_number=b'\x00'*4):
xpub = xpub_header(xtype) + bytes([depth]) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def deserialize_xkey(xkey, prv):
xkey = DecodeBase58Check(xkey)
if len(xkey) != 78:
raise BaseException('Invalid length')
depth = xkey[4]
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
header = int('0x' + bh2u(xkey[0:4]), 16)
headers = XPRV_HEADERS if prv else XPUB_HEADERS
if header not in headers.values():
raise BaseException('Invalid xpub format', hex(header))
xtype = list(headers.keys())[list(headers.values()).index(header)]
n = 33 if prv else 32
K_or_k = xkey[13+n:]
return xtype, depth, fingerprint, child_number, c, K_or_k
def deserialize_xpub(xkey):
return deserialize_xkey(xkey, False)
def deserialize_xprv(xkey):
return deserialize_xkey(xkey, True)
def xpub_type(x):
return deserialize_xpub(x)[0]
def is_xpub(text):
try:
deserialize_xpub(text)
return True
except:
return False
def is_xprv(text):
try:
deserialize_xprv(text)
return True
except:
return False
def xpub_from_xprv(xprv):
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
K, cK = get_pubkeys_from_secret(k)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_root(seed, xtype):
I = hmac.new(b"Bitcoin seed", seed, hashlib.sha512).digest()
master_k = I[0:32]
master_c = I[32:]
K, cK = get_pubkeys_from_secret(master_k)
xprv = serialize_xprv(xtype, master_c, master_k)
xpub = serialize_xpub(xtype, master_c, cK)
return xprv, xpub
def xpub_from_pubkey(xtype, cK):
assert cK[0] in [0x02, 0x03]
return serialize_xpub(xtype, b'\x00'*32, cK)
def bip32_derivation(s):
assert s.startswith('m/')
s = s[2:]
for n in s.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
yield i
def is_bip32_derivation(x):
try:
[ i for i in bip32_derivation(x)]
return True
except :
return False
def bip32_private_derivation(xprv, branch, sequence):
assert sequence.startswith(branch)
if branch == sequence:
return xprv, xpub_from_xprv(xprv)
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
_, parent_cK = get_pubkeys_from_secret(parent_k)
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
K, cK = get_pubkeys_from_secret(k)
xpub = serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
xprv = serialize_xprv(xtype, c, k, depth, fingerprint, child_number)
return xprv, xpub
def bip32_public_derivation(xpub, branch, sequence):
xtype, depth, fingerprint, child_number, c, cK = deserialize_xpub(xpub)
assert sequence.startswith(branch)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return k
|
the-stack_0_12201 | # Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""This module extends the TensorFlowOnSpark API to support Spark ML Pipelines.
It provides a TFEstimator class to fit a TFModel using TensorFlow. The TFEstimator will actually spawn a TensorFlowOnSpark cluster
to conduct distributed training, but due to architectural limitations, the TFModel will only run single-node TensorFlow instances
when inferencing on the executors. The executors will run in parallel, but the TensorFlow model must fit in the memory
of each executor.
There is also an option to provide a separate "export" function, which allows users to export a different graph for inferencing vs. training.
This is useful when the training graph uses InputMode.TENSORFLOW with queue_runners, but the inferencing graph needs placeholders.
And this is especially useful for exporting saved_models for TensorFlow Serving.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pyspark.context import SparkContext
from pyspark.ml.param.shared import Param, Params, TypeConverters
from pyspark.ml.pipeline import Estimator, Model
from pyspark.sql import Row, SparkSession
import tensorflow as tf
from tensorflow.contrib.saved_model.python.saved_model import reader, signature_def_utils
from tensorflow.python.saved_model import loader
from . import TFCluster, gpu_info, dfutil
import argparse
import copy
import logging
import os
import subprocess
import sys
##### TensorFlowOnSpark Params
class TFTypeConverters(object):
"""Custom DataFrame TypeConverter for dictionary types (since this is not provided by Spark core)."""
@staticmethod
def toDict(value):
if type(value) == dict:
return value
else:
raise TypeError("Could not convert %s to OrderedDict" % value)
class HasBatchSize(Params):
batch_size = Param(Params._dummy(), "batch_size", "Number of records per batch", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasBatchSize, self).__init__()
def setBatchSize(self, value):
return self._set(batch_size=value)
def getBatchSize(self):
return self.getOrDefault(self.batch_size)
class HasClusterSize(Params):
cluster_size = Param(Params._dummy(), "cluster_size", "Number of nodes in the cluster", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasClusterSize, self).__init__()
def setClusterSize(self, value):
return self._set(cluster_size=value)
def getClusterSize(self):
return self.getOrDefault(self.cluster_size)
class HasEpochs(Params):
epochs = Param(Params._dummy(), "epochs", "Number of epochs to train", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasEpochs, self).__init__()
def setEpochs(self, value):
return self._set(epochs=value)
def getEpochs(self):
return self.getOrDefault(self.epochs)
class HasInputMapping(Params):
input_mapping = Param(Params._dummy(), "input_mapping", "Mapping of input DataFrame column to input tensor", typeConverter=TFTypeConverters.toDict)
def __init__(self):
super(HasInputMapping, self).__init__()
def setInputMapping(self, value):
return self._set(input_mapping=value)
def getInputMapping(self):
return self.getOrDefault(self.input_mapping)
class HasInputMode(Params):
input_mode = Param(Params._dummy(), "input_mode", "Input data feeding mode (0=TENSORFLOW, 1=SPARK)", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasInputMode, self).__init__()
def setInputMode(self, value):
return self._set(input_mode=value)
def getInputMode(self):
return self.getOrDefault(self.input_mode)
class HasModelDir(Params):
model_dir = Param(Params._dummy(), "model_dir", "Path to save/load model checkpoints", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasModelDir, self).__init__()
def setModelDir(self, value):
return self._set(model_dir=value)
def getModelDir(self):
return self.getOrDefault(self.model_dir)
class HasNumPS(Params):
num_ps = Param(Params._dummy(), "num_ps", "Number of PS nodes in cluster", typeConverter=TypeConverters.toInt)
driver_ps_nodes = Param(Params._dummy(), "driver_ps_nodes", "Run PS nodes on driver locally", typeConverter=TypeConverters.toBoolean)
def __init__(self):
super(HasNumPS, self).__init__()
def setNumPS(self, value):
return self._set(num_ps=value)
def getNumPS(self):
return self.getOrDefault(self.num_ps)
def setDriverPSNodes(self, value):
return self._set(driver_ps_nodes=value)
def getDriverPSNodes(self):
return self.getOrDefault(self.driver_ps_nodes)
class HasOutputMapping(Params):
output_mapping = Param(Params._dummy(), "output_mapping", "Mapping of output tensor to output DataFrame column", typeConverter=TFTypeConverters.toDict)
def __init__(self):
super(HasOutputMapping, self).__init__()
def setOutputMapping(self, value):
return self._set(output_mapping=value)
def getOutputMapping(self):
return self.getOrDefault(self.output_mapping)
class HasProtocol(Params):
protocol = Param(Params._dummy(), "protocol", "Network protocol for Tensorflow (grpc|rdma)", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasProtocol, self).__init__()
def setProtocol(self, value):
return self._set(protocol=value)
def getProtocol(self):
return self.getOrDefault(self.protocol)
class HasReaders(Params):
readers = Param(Params._dummy(), "readers", "number of reader/enqueue threads", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasReaders, self).__init__()
def setReaders(self, value):
return self._set(readers=value)
def getReaders(self):
return self.getOrDefault(self.readers)
class HasSteps(Params):
steps = Param(Params._dummy(), "steps", "Maximum number of steps to train", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasSteps, self).__init__()
def setSteps(self, value):
return self._set(steps=value)
def getSteps(self):
return self.getOrDefault(self.steps)
class HasTensorboard(Params):
tensorboard = Param(Params._dummy(), "tensorboard", "Launch tensorboard process", typeConverter=TypeConverters.toBoolean)
def __init__(self):
super(HasTensorboard, self).__init__()
def setTensorboard(self, value):
return self._set(tensorboard=value)
def getTensorboard(self):
return self.getOrDefault(self.tensorboard)
class HasTFRecordDir(Params):
tfrecord_dir = Param(Params._dummy(), "tfrecord_dir", "Path to temporarily export a DataFrame as TFRecords (for InputMode.TENSORFLOW apps)", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasTFRecordDir, self).__init__()
def setTFRecordDir(self, value):
return self._set(tfrecord_dir=value)
def getTFRecordDir(self):
return self.getOrDefault(self.tfrecord_dir)
##### SavedModelBuilder Params
class HasExportDir(Params):
export_dir = Param(Params._dummy(), "export_dir", "Directory to export saved_model", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasExportDir, self).__init__()
def setExportDir(self, value):
return self._set(export_dir=value)
def getExportDir(self):
return self.getOrDefault(self.export_dir)
class HasSignatureDefKey(Params):
signature_def_key = Param(Params._dummy(), "signature_def_key", "Identifier for a specific saved_model signature", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasSignatureDefKey, self).__init__()
self._setDefault(signature_def_key=None)
def setSignatureDefKey(self, value):
return self._set(signature_def_key=value)
def getSignatureDefKey(self):
return self.getOrDefault(self.signature_def_key)
class HasTagSet(Params):
tag_set = Param(Params._dummy(), "tag_set", "Comma-delimited list of tags identifying a saved_model metagraph", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasTagSet, self).__init__()
def setTagSet(self, value):
return self._set(tag_set=value)
def getTagSet(self):
return self.getOrDefault(self.tag_set)
class Namespace(object):
"""
Utility class to convert dictionaries to Namespace-like objects.
Based on https://docs.python.org/dev/library/types.html#types.SimpleNamespace
"""
argv = None
def __init__(self, d):
if isinstance(d, list):
self.argv = d
elif isinstance(d, dict):
self.__dict__.update(d)
elif isinstance(d, argparse.Namespace):
self.__dict__.update(vars(d))
elif isinstance(d, Namespace):
self.__dict__.update(d.__dict__)
else:
raise Exception("Unsupported Namespace args: {}".format(d))
def __iter__(self):
if self.argv:
for item in self.argv:
yield item
else:
for key in self.__dict__.keys():
yield key
def __repr__(self):
if self.argv:
return "{}".format(self.argv)
else:
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__(self, other):
if self.argv:
return self.argv == other
else:
return self.__dict__ == other.__dict__
class TFParams(Params):
"""Mix-in class to store namespace-style args and merge w/ SparkML-style params."""
args = None
def merge_args_params(self):
local_args = copy.copy(self.args) # make a local copy of args
args_dict = vars(local_args) # get dictionary view
for p in self.params:
args_dict[p.name] = self.getOrDefault(p.name) # update with params
return local_args
class TFEstimator(Estimator, TFParams, HasInputMapping,
HasClusterSize, HasNumPS, HasInputMode, HasProtocol, HasTensorboard, HasModelDir, HasExportDir, HasTFRecordDir,
HasBatchSize, HasEpochs, HasReaders, HasSteps):
"""Spark ML Estimator which launches a TensorFlowOnSpark cluster for distributed training.
The columns of the DataFrame passed to the ``fit()`` method will be mapped to TensorFlow tensors according to the ``setInputMapping()`` method.
If an ``export_fn`` was provided to the constructor, it will be run on a single executor immediately after the distributed training has completed.
This allows users to export a TensorFlow saved_model with a different execution graph for inferencing, e.g. replacing an input graph of
TFReaders and QueueRunners with Placeholders.
For InputMode.TENSORFLOW, the input DataFrame will be exported as TFRecords to a temporary location specified by the ``tfrecord_dir``.
The TensorFlow application will then be expected to read directly from this location during training. However, if the input DataFrame was
produced by the ``dfutil.loadTFRecords()`` method, i.e. originated from TFRecords on disk, then the `tfrecord_dir` will be set to the
original source location of the TFRecords with the additional export step.
Args:
:train_fn: TensorFlow "main" function for training.
:tf_args: Arguments specific to the TensorFlow "main" function.
:export_fn: TensorFlow function for exporting a saved_model.
"""
train_fn = None
export_fn = None
def __init__(self, train_fn, tf_args, export_fn=None):
super(TFEstimator, self).__init__()
self.train_fn = train_fn
self.export_fn = export_fn
self.args = Namespace(tf_args)
self._setDefault(input_mapping={},
cluster_size=1,
num_ps=0,
driver_ps_nodes=False,
input_mode=TFCluster.InputMode.SPARK,
protocol='grpc',
tensorboard=False,
model_dir=None,
export_dir=None,
tfrecord_dir=None,
batch_size=100,
epochs=1,
readers=1,
steps=1000)
def _fit(self, dataset):
"""Trains a TensorFlow model and returns a TFModel instance with the same args/params pointing to a checkpoint or saved_model on disk.
Args:
:dataset: A Spark DataFrame with columns that will be mapped to TensorFlow tensors.
Returns:
A TFModel representing the trained model, backed on disk by a TensorFlow checkpoint or saved_model.
"""
sc = SparkContext.getOrCreate()
logging.info("===== 1. train args: {0}".format(self.args))
logging.info("===== 2. train params: {0}".format(self._paramMap))
local_args = self.merge_args_params()
logging.info("===== 3. train args + params: {0}".format(local_args))
if local_args.input_mode == TFCluster.InputMode.TENSORFLOW:
if dfutil.isLoadedDF(dataset):
# if just a DataFrame loaded from tfrecords, just point to original source path
logging.info("Loaded DataFrame of TFRecord.")
local_args.tfrecord_dir = dfutil.loadedDF[dataset]
else:
# otherwise, save as tfrecords and point to save path
assert local_args.tfrecord_dir, "Please specify --tfrecord_dir to export DataFrame to TFRecord."
if self.getInputMapping():
# if input mapping provided, filter only required columns before exporting
dataset = dataset.select(self.getInputMapping().keys())
logging.info("Exporting DataFrame {} as TFRecord to: {}".format(dataset.dtypes, local_args.tfrecord_dir))
dfutil.saveAsTFRecords(dataset, local_args.tfrecord_dir)
logging.info("Done saving")
tf_args = self.args.argv if self.args.argv else local_args
cluster = TFCluster.run(sc, self.train_fn, tf_args, local_args.cluster_size, local_args.num_ps,
local_args.tensorboard, local_args.input_mode, driver_ps_nodes=local_args.driver_ps_nodes)
if local_args.input_mode == TFCluster.InputMode.SPARK:
# feed data, using a deterministic order for input columns (lexicographic by key)
input_cols = sorted(self.getInputMapping().keys())
cluster.train(dataset.select(input_cols).rdd, local_args.epochs)
cluster.shutdown()
# Run export function, if provided
if self.export_fn:
assert local_args.export_dir, "Export function requires --export_dir to be set"
logging.info("Exporting saved_model (via export_fn) to: {}".format(local_args.export_dir))
def _export(iterator, fn, args):
single_node_env(args)
fn(args)
# Run on a single exeucutor
sc.parallelize([1], 1).foreachPartition(lambda it: _export(it, self.export_fn, tf_args))
return self._copyValues(TFModel(self.args))
class TFModel(Model, TFParams,
HasInputMapping, HasOutputMapping,
HasBatchSize,
HasModelDir, HasExportDir, HasSignatureDefKey, HasTagSet):
"""Spark ML Model backed by a TensorFlow model checkpoint/saved_model on disk.
During ``transform()``, each executor will run an independent, single-node instance of TensorFlow in parallel, so the model must fit in memory.
The model/session will be loaded/initialized just once for each Spark Python worker, and the session will be cached for
subsequent tasks/partitions to avoid re-loading the model for each partition.
Args:
:tf_args: Dictionary of arguments specific to TensorFlow "main" function.
"""
def __init__(self, tf_args):
super(TFModel, self).__init__()
self.args = Namespace(tf_args)
self._setDefault(input_mapping={},
output_mapping={},
batch_size=100,
model_dir=None,
export_dir=None,
signature_def_key=None,
tag_set=None)
def _transform(self, dataset):
"""Transforms the input DataFrame by applying the _run_model() mapPartitions function.
Args:
:dataset: A Spark DataFrame for TensorFlow inferencing.
"""
spark = SparkSession.builder.getOrCreate()
# set a deterministic order for input/output columns (lexicographic by key)
input_cols = [ col for col, tensor in sorted(self.getInputMapping().items()) ] # input col => input tensor
output_cols = [ col for tensor, col in sorted(self.getOutputMapping().items()) ] # output tensor => output col
# run single-node inferencing on each executor
logging.info("input_cols: {}".format(input_cols))
logging.info("output_cols: {}".format(output_cols))
# merge args + params
logging.info("===== 1. inference args: {0}".format(self.args))
logging.info("===== 2. inference params: {0}".format(self._paramMap))
local_args = self.merge_args_params()
logging.info("===== 3. inference args + params: {0}".format(local_args))
tf_args = self.args.argv if self.args.argv else local_args
rdd_out = dataset.select(input_cols).rdd.mapPartitions(lambda it: _run_model(it, local_args, tf_args))
# convert to a DataFrame-friendly format
rows_out = rdd_out.map(lambda x: Row(*x))
return spark.createDataFrame(rows_out, output_cols)
# global to each python worker process on the executors
global_sess = None # tf.Session cache
global_args = None # args provided to the _run_model() method. Any change will invalidate the global_sess cache.
def _run_model(iterator, args, tf_args):
"""mapPartitions function to run single-node inferencing from a checkpoint/saved_model, using the model's input/output mappings.
Args:
:iterator: input RDD partition iterator.
:args: arguments for TFModel, in argparse format
:tf_args: arguments for TensorFlow inferencing code, in argparse or ARGV format.
Returns:
An iterator of result data.
"""
single_node_env(tf_args)
logging.info("===== input_mapping: {}".format(args.input_mapping))
logging.info("===== output_mapping: {}".format(args.output_mapping))
input_tensor_names = [ tensor for col,tensor in sorted(args.input_mapping.items()) ]
output_tensor_names = [ tensor for tensor,col in sorted(args.output_mapping.items()) ]
# if using a signature_def_key, get input/output tensor info from the requested signature
if args.signature_def_key:
assert args.export_dir, "Inferencing with signature_def_key requires --export_dir argument"
logging.info("===== loading meta_graph_def for tag_set ({0}) from saved_model: {1}".format(args.tag_set, args.export_dir))
meta_graph_def = get_meta_graph_def(args.export_dir, args.tag_set)
signature = signature_def_utils.get_signature_def_by_key(meta_graph_def, args.signature_def_key)
logging.debug("signature: {}".format(signature))
inputs_tensor_info = signature.inputs
logging.debug("inputs_tensor_info: {0}".format(inputs_tensor_info))
outputs_tensor_info = signature.outputs
logging.debug("outputs_tensor_info: {0}".format(outputs_tensor_info))
result = []
global global_sess, global_args
if global_sess and global_args == args:
# if graph/session already loaded/started (and using same args), just reuse it
sess = global_sess
else:
# otherwise, create new session and load graph from disk
tf.reset_default_graph()
sess = tf.Session(graph=tf.get_default_graph())
if args.export_dir:
assert args.tag_set, "Inferencing from a saved_model requires --tag_set"
# load graph from a saved_model
logging.info("===== restoring from saved_model: {}".format(args.export_dir))
loader.load(sess, args.tag_set.split(','), args.export_dir)
elif args.model_dir:
# load graph from a checkpoint
ckpt = tf.train.latest_checkpoint(args.model_dir)
assert ckpt, "Invalid model checkpoint path: {}".format(args.model_dir)
logging.info("===== restoring from checkpoint: {}".format(ckpt + ".meta"))
saver = tf.train.import_meta_graph(ckpt + ".meta", clear_devices=True)
saver.restore(sess, ckpt)
else:
raise Exception("Inferencing requires either --model_dir or --export_dir argument")
global_sess = sess
global_args = args
# get list of input/output tensors (by name)
if args.signature_def_key:
input_tensors = [inputs_tensor_info[t].name for t in input_tensor_names]
output_tensors = [outputs_tensor_info[output_tensor_names[0]].name]
else:
input_tensors = [t + ':0' for t in input_tensor_names]
output_tensors = [t + ':0' for t in output_tensor_names]
logging.info("input_tensors: {0}".format(input_tensors))
logging.info("output_tensors: {0}".format(output_tensors))
# feed data in batches and return output tensors
for tensors in yield_batch(iterator, args.batch_size, len(input_tensor_names)):
inputs_feed_dict = {}
for i in range(len(input_tensors)):
inputs_feed_dict[input_tensors[i]] = tensors[i]
outputs = sess.run(output_tensors, feed_dict=inputs_feed_dict)
lengths = [ len(output) for output in outputs ]
input_size = len(tensors[0])
assert all([ l == input_size for l in lengths ]), "Output array sizes {} must match input size: {}".format(lengths, input_size)
python_outputs = [ output.tolist() for output in outputs ] # convert from numpy to standard python types
result.extend(zip(*python_outputs)) # convert to an array of tuples of "output columns"
return result
def single_node_env(args):
"""Sets up environment for a single-node TF session.
Args:
:args: command line arguments as either argparse args or argv list
"""
if isinstance(args, list):
sys.argv = args
elif args.argv:
sys.argv = args.argv
# ensure expanded CLASSPATH w/o glob characters (required for Spark 2.1 + JNI)
if 'HADOOP_PREFIX' in os.environ and 'TFOS_CLASSPATH_UPDATED' not in os.environ:
classpath = os.environ['CLASSPATH']
hadoop_path = os.path.join(os.environ['HADOOP_PREFIX'], 'bin', 'hadoop')
hadoop_classpath = subprocess.check_output([hadoop_path, 'classpath', '--glob']).decode()
logging.debug("CLASSPATH: {0}".format(hadoop_classpath))
os.environ['CLASSPATH'] = classpath + os.pathsep + hadoop_classpath
os.environ['TFOS_CLASSPATH_UPDATED'] = '1'
# reserve GPU, if requested
if tf.test.is_built_with_cuda():
# GPU
num_gpus = args.num_gpus if 'num_gpus' in args else 1
gpus_to_use = gpu_info.get_gpus(num_gpus)
logging.info("Using gpu(s): {0}".format(gpus_to_use))
os.environ['CUDA_VISIBLE_DEVICES'] = gpus_to_use
# Note: if there is a GPU conflict (CUDA_ERROR_INVALID_DEVICE), the entire task will fail and retry.
else:
# CPU
logging.info("Using CPU")
os.environ['CUDA_VISIBLE_DEVICES'] = ''
def get_meta_graph_def(saved_model_dir, tag_set):
"""Utility function to read a meta_graph_def from disk.
From `saved_model_cli.py <https://github.com/tensorflow/tensorflow/blob/8e0e8d41a3a8f2d4a6100c2ea1dc9d6c6c4ad382/tensorflow/python/tools/saved_model_cli.py#L186>`_
Args:
:saved_model_dir: path to saved_model.
:tag_set: list of string tags identifying the TensorFlow graph within the saved_model.
Returns:
A TensorFlow meta_graph_def, or raises an Exception otherwise.
"""
saved_model = reader.read_saved_model(saved_model_dir)
set_of_tags = set(tag_set.split(','))
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
return meta_graph_def
raise RuntimeError("MetaGraphDef associated with tag-set {0} could not be found in SavedModel".format(tag_set))
def yield_batch(iterable, batch_size, num_tensors=1):
"""Generator that yields batches of a DataFrame iterator.
Args:
:iterable: Spark partition iterator.
:batch_size: number of items to retrieve per invocation.
:num_tensors: number of tensors (columns) expected in each item.
Returns:
An array of ``num_tensors`` arrays, each of length `batch_size`
"""
tensors = [ [] for i in range(num_tensors) ]
for item in iterable:
if item is None:
break
for i in range(num_tensors):
tmp = str(item[i]) if type(item[i]) is bytearray else item[i]
tensors[i].append(tmp)
if len(tensors[0]) >= batch_size:
yield tensors
tensors = [ [] for i in range(num_tensors) ]
if len(tensors[0]) > 0:
yield tensors
|
the-stack_0_12202 | #!/usr/bin/python3
import time
import json
import sys
import random
from neopixel import *
from dynamic_pattern_list_builder import *
# LED strip configuration:
LED_COUNT = 24 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
pulse_on = Color(255, 255, 255)
pulse_off = Color(0, 0, 0)
heartbeat_pulse = 3
heartbeat_gap = 0.07 # gap between beats
# Dictionary containing object positions
patterns = {
'elevation' : [1, 2, 3],
'distance' : [10, 15, 20, 25],
'direction' : [[0, 45, 90, 135, 180, 225, 270, 315],[315, 270, 225, 180, 135, 90, 45, 0],[0, 45, 90, 135, 180, 225, 270, 315]],
'pin_out' : [[0,1,2,3,4,5,6,7],[8,9,10,11,12,13,14,15],[16,17,18,19,20,21,22,23]]
}
# global list declarations and class initialization
dynamicPattern = Dynamic_pattern_list_builder() # class initialization
pat = dynamicPattern.pattern_builder() # dynamic_pattern declaration
randNumList = [] # list of random numbers
visitedPattern = [] # list of visited patterns
dList = [] # list of keys
# create list of dictionary keys
for i in pat:
dList.append(i)
# json handler to read in dictionary of dynamic patterns
#f = open('dynamic_pattern_list.json', 'r')
#fin = json.load(f)
#f.close()
#for i in fin:
# print(fin['dynamic patterns'])
# creates the heartbeat pulse
# handles 10, 15, 20 feet heartbeat patterns
# if 25 feet, creates sonar pulse
def heart_beat(strip, elevation, distance, direction):
pix = patterns.get('pin_out')[elevation-1][direction/45]
beat = 0
if (distance == 10):
beat = 0.300
elif (distance == 15):
beat = 0.650
elif (distance == 20):
beat = 1.000
elif (distance == 25):
beat = 1.00
heart_gap = 0.5
# sonar pulse for 25 feet
for i in range(heartbeat_pulse):
strip.setPixelColor(pix,pulse_on)
strip.show()
time.sleep(heart_gap)
strip.setPixelColor(pix,pulse_off)
strip.show()
time.sleep(beat)
# Heartbeat pattern for 10 through 20 feet
for x in range(heartbeat_pulse):
strip.setPixelColor(pix,pulse_on)
strip.show()
time.sleep(heartbeat_gap)
strip.setPixelColor(pix,pulse_off)
strip.show()
time.sleep(heartbeat_gap)
strip.setPixelColor(pix,pulse_on)
strip.show()
time.sleep(heartbeat_gap)
strip.setPixelColor(pix,pulse_off)
strip.show()
time.sleep(beat)
# handler for dynamic patterns
# calls dynamic_pattern_list_builder.py
# randomly selects a dynamic pattern and calls all the beats to simulate that pattern
def dynamic_pattern_handler(strip):
while (len(randNumList) < 23):
rNum = random.randint(0, 22)
while (rNum not in randNumList):
randNumList.append(rNum)
dBeat = dList[rNum]
visitedPattern.append(dBeat)
for dPat in visitedPattern:
print(dPat)
for beat in pat.get(dPat):
elevation = beat[0]
distance = beat[1]
direction = beat[2]
print ('elevation: ' + str(elevation) + ' ' + 'distance: ' + str(distance) + ' ' + 'direction: ' + str(direction))
heart_beat(strip, elevation, distance, direction)
if __name__ == '__main__':
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Initialize the library (must be called once before other functions).
strip.begin()
print ('Press Ctrl-C to quit.')
try:
dynamic_pattern_handler(strip)
except KeyboardInterrupt:
colorWipe(strip, Color(0,0,0), 10)
print("Goodbye World")
|
the-stack_0_12203 | #
# Copyright (c) 2021 Cisco Systems, Inc and its affiliates
# All rights reserved
#
from msxswagger import DocumentationConfig, Security, Sso
from config import Config
from helpers.consul_helper import ConsulHelper
class SwaggerHelper(object):
def __init__(self, config: Config, consul_helper: ConsulHelper):
self._config = config
self._consul_helper = consul_helper
def get_documentation_config(self):
sso_url = self._consul_helper.get_string(
key=f"{self._config.config_prefix}/defaultapplication/swagger.security.sso.baseUrl",
default=self._config.swagger.ssourl)
client_id = self._consul_helper.get_string(
key=f"{self._config.config_prefix}/helloworldservice/public.security.clientId",
default=self._config.swagger.clientid)
return DocumentationConfig(
root_path='/helloworld',
security=Security(
enabled=self._config.swagger.secure,
sso=Sso(base_url=sso_url, client_id=client_id)))
def get_swagger_resource(self):
return self._config.swagger.swaggerjsonpath
|
the-stack_0_12204 | def main():
import sys
import signal
import argparse
import json
from edman import DB
from scripts.action import Action
# Ctrl-Cを押下された時の対策
signal.signal(signal.SIGINT, lambda sig, frame: sys.exit('\n'))
# コマンドライン引数処理
parser = argparse.ArgumentParser(description='ドキュメントの項目を修正するスクリプト')
# parser.add_argument('-c', '--collection', help='collection name.')
parser.add_argument('objectid', help='objectid str.')
parser.add_argument('amend_file', type=open, help='JSON file.')
parser.add_argument('structure', help='Select ref or emb.')
parser.add_argument('-i', '--inifile', help='DB connect file path.')
# 引数を付けなかった場合はヘルプを表示して終了する
if len(sys.argv) == 1:
parser.parse_args(["-h"])
sys.exit(0)
args = parser.parse_args()
# 構造はrefかembのどちらか
if not (args.structure == 'ref' or args.structure == 'emb'):
parser.error("structure requires 'ref' or 'emb'.")
try:
# iniファイル読み込み
con = Action.reading_config_file(args.inifile)
# ファイル読み込み
try:
amend_data = json.load(args.amend_file)
except json.JSONDecodeError:
sys.exit(f'File is not json format.')
except IOError:
sys.exit('file read error.')
# DB接続
db = DB(con)
# 対象oidの所属コレクションを自動的に取得 ※動作が遅い場合は使用しないこと
collection = db.find_collection_from_objectid(args.objectid)
# アップデート処理
if db.update(collection, args.objectid, amend_data, args.structure):
print('アップデート成功')
else:
print('アップデート失敗')
except Exception as e:
tb = sys.exc_info()[2]
sys.stderr.write(f'{type(e).__name__}: {e.with_traceback(tb)}\n')
sys.exit(1)
if __name__ == "__main__":
main() |
the-stack_0_12205 | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
The context of mindspore, used to configure the current execution environment,
includes the execution mode, execution backend and other feature switches.
"""
import json
import os
import time
import threading
from collections import namedtuple
from types import FunctionType
from mindspore import log as logger
from mindspore._c_expression import MSContext, ms_ctx_param
from mindspore._checkparam import args_type_check, Validator
from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context, _get_auto_parallel_context, \
_reset_auto_parallel_context
from mindspore.parallel._ps_context import _set_ps_context, _get_ps_context, _reset_ps_context
from .default_config import __device_target__, __package_name__
__all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'set_context', 'get_context', 'set_auto_parallel_context',
'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode', 'set_ps_context',
'get_ps_context', 'reset_ps_context']
GRAPH_MODE = 0
PYNATIVE_MODE = 1
_DEVICE_APP_MEMORY_SIZE = 31 # The max memory size of graph plus variable.
_re_pattern = r'[1-9][0-9]*(\.)?[0-9]*GB|0\.[0-9]*GB'
_k_context = None
def _make_directory(path):
"""Make directory."""
real_path = None
if path is None or not isinstance(path, str) or path.strip() == "":
raise ValueError(f"Input path `{path}` is invalid type")
# convert the relative paths
path = os.path.realpath(path)
logger.debug("The absolute path is %r", path)
# check whether the path is already existed and has written permissions
if os.path.exists(path):
real_path = path
else:
# All exceptions need to be caught because create directory maybe have some limit(permissions)
logger.debug("The directory(%s) doesn't exist, will create it", path)
try:
os.makedirs(path)
real_path = path
except PermissionError as e:
logger.error(f"No write permission on the directory `{path}, error = {e}")
raise ValueError(f"No write permission on the directory `{path}`.")
return real_path
def _get_print_file_name(file_name):
"""Add timestamp suffix to file name. Rename the file name: file_name + "." + time(seconds)."""
time_second = str(int(time.time()))
file_name = file_name + "." + time_second
if os.path.exists(file_name):
ValueError("This file {} already exists.".format(file_name))
return file_name
class _ThreadLocalInfo(threading.local):
"""
Thread local Info used for store thread local attributes.
"""
def __init__(self):
super(_ThreadLocalInfo, self).__init__()
self._reserve_class_name_in_scope = True
@property
def reserve_class_name_in_scope(self):
"""Gets whether to save the network class name in the scope."""
return self._reserve_class_name_in_scope
@reserve_class_name_in_scope.setter
def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
"""Sets whether to save the network class name in the scope."""
if not isinstance(reserve_class_name_in_scope, bool):
raise ValueError(
"Set reserve_class_name_in_scope value must be bool!")
self._reserve_class_name_in_scope = reserve_class_name_in_scope
_ContextRecord = namedtuple(
"_ContextRecord", ["is_pynative_mode", "switch_context_fn"])
class _ContextSwitchInfo(threading.local):
"""
Record of context switch information.
Args:
is_pynative (bool): Whether to adopt the PyNative mode.
"""
def __init__(self, is_pynative):
super(_ContextSwitchInfo, self).__init__()
self.context_stack = []
if is_pynative:
self.push(True, None)
def push(self, is_pynative, switch_context_fn):
"""
Push a context switch record onto the stack.
Args:
is_pynative (bool): Whether context switch to PyNative mode.
switch_context_fn (Function): A callable that executes the context switch.
"""
if isinstance(switch_context_fn, FunctionType):
switch_context_fn()
self.context_stack.append(
_ContextRecord(is_pynative, switch_context_fn))
def pop(self):
self.context_stack.pop()
class _Context:
"""
_Context is the environment in which operations are executed
Note:
Create a context through instantiating Context object is not recommended.
should use context() to get the context since Context is singleton.
"""
_instance = None
_instance_lock = threading.Lock()
def __init__(self):
self._thread_local_info = _ThreadLocalInfo()
self._context_switches = _ContextSwitchInfo(True)
self._context_handle = MSContext.get_instance()
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance_lock.acquire()
cls._instance = object.__new__(cls)
cls._instance_lock.release()
return cls._instance
def __getattribute__(self, attr):
value = object.__getattribute__(self, attr)
if attr == "_context_handle" and value is None:
raise ValueError("Context handle is none in context!!!")
return value
def get_param(self, param):
return self._context_handle.get_param(param)
def set_param(self, param, value):
self._context_handle.set_param(param, value)
def set_mode(self, mode):
"""
Switch between Graph mode and PyNative mode.
Args:
mode (int): GRAPH_MODE or PYNATIVE_MODE.
"""
if mode == PYNATIVE_MODE:
if self.enable_debug_runtime:
self.set_backend_policy("vm")
self._context_switches.push(True, None)
elif mode == GRAPH_MODE:
if self.enable_debug_runtime:
self.set_backend_policy("ge")
self._context_switches.push(False, None)
else:
raise ValueError(f'The execution mode {mode} is invalid!')
self.set_param(ms_ctx_param.mode, mode)
def set_backend_policy(self, policy):
success = self._context_handle.set_backend_policy(policy)
if not success:
raise RuntimeError("Backend policy must be one of ge, vm, ms.")
def set_save_graphs_path(self, save_graphs_path):
self.set_param(ms_ctx_param.save_graphs_path, _make_directory(save_graphs_path))
def set_device_target(self, target):
valid_targets = ["CPU", "GPU", "Ascend", "Davinci"]
if not target in valid_targets:
raise ValueError(f"Target device name {target} is invalid! It must be one of {valid_targets}")
if target == "Davinci":
target = "Ascend"
self.set_param(ms_ctx_param.device_target, target)
if self.enable_debug_runtime and target == "CPU":
self.set_backend_policy("vm")
def set_auto_tune_mode(self, tune_mode):
candidate = ["NO_TUNE", "RL", "GA", "RL,GA", "GA,RL"]
if tune_mode in candidate:
self.set_param(ms_ctx_param.tune_mode, tune_mode)
else:
raise ValueError(f"Tune mode must be in ['NO_TUNE', 'RL', 'GA', 'RL,GA', 'GA,RL'], but got {tune_mode}")
def set_device_id(self, device_id):
if device_id < 0 or device_id > 4095:
raise ValueError(f"Device id must be in [0, 4095], but got {device_id}")
self.set_param(ms_ctx_param.device_id, device_id)
def set_max_call_depth(self, max_call_depth):
if max_call_depth <= 0:
raise ValueError(f"Max call depth must be greater than 0, but got {max_call_depth}")
self.set_param(ms_ctx_param.max_call_depth, max_call_depth)
def set_profiling_options(self, option):
if not isinstance(option, str):
raise TypeError("The parameter option must be str.")
self.set_param(ms_ctx_param.profiling_options, option)
def set_variable_memory_max_size(self, variable_memory_max_size):
"""set values of variable_memory_max_size and graph_memory_max_size"""
if not Validator.check_str_by_regular(variable_memory_max_size, _re_pattern):
raise ValueError("Context param variable_memory_max_size should be in correct format! Such as \"5GB\"")
if int(variable_memory_max_size[:-2]) > _DEVICE_APP_MEMORY_SIZE:
raise ValueError("Context param variable_memory_max_size should be not greater than 31GB.")
variable_memory_max_size_ = variable_memory_max_size[:-2] + " * 1024 * 1024 * 1024"
graph_memory_max_size = _DEVICE_APP_MEMORY_SIZE - int(variable_memory_max_size[:-2])
graph_memory_max_size_ = str(graph_memory_max_size) + " * 1024 * 1024 * 1024"
self.set_param(ms_ctx_param.variable_memory_max_size, variable_memory_max_size_)
# pylint: disable=protected-access
self.set_param(ms_ctx_param._graph_memory_max_size, graph_memory_max_size_)
def set_max_device_memory(self, max_device_memory):
if not Validator.check_str_by_regular(max_device_memory, _re_pattern):
raise ValueError("Context param max_device_memory should be in correct format! Such as \"3.5GB\"")
max_device_memory_value = float(max_device_memory[:-2])
if max_device_memory_value == 0:
raise ValueError("Context param max_device_memory should be in correct format! Such as \"3.5GB\"")
self.set_param(ms_ctx_param.max_device_memory, max_device_memory_value)
def set_print_file_path(self, file_path):
"""Add timestamp suffix to file name. Sets print file path."""
print_file_path = os.path.realpath(file_path)
if os.path.isdir(print_file_path):
raise IOError("Print_file_path should be file path, but got {}.".format(file_path))
if os.path.exists(print_file_path):
_path, _file_name = os.path.split(print_file_path)
path = _make_directory(_path)
file_name = _get_print_file_name(_file_name)
full_file_name = os.path.join(path, file_name)
else:
full_file_name = print_file_path
self.set_param(ms_ctx_param.print_file_path, full_file_name)
def set_env_config_path(self, env_config_path):
"""Check and set env_config_path."""
if not self._context_handle.enable_dump_ir():
raise ValueError("The 'env_config_path' is not supported, please enable ENABLE_DUMP_IR "
"with '-D on' and recompile source.")
env_config_path = os.path.realpath(env_config_path)
if not os.path.isfile(env_config_path):
raise ValueError("The %r set by 'env_config_path' should be an existing json file." % env_config_path)
try:
with open(env_config_path, 'r') as f:
json.load(f)
except (TypeError, ValueError) as exo:
raise ValueError("The %r set by 'env_config_path' should be a json file. "
"Detail: %s." % (env_config_path, str(exo)))
self.set_param(ms_ctx_param.env_config_path, env_config_path)
setters = {
'mode': set_mode,
'save_graphs_path': set_save_graphs_path,
'device_target': set_device_target,
'device_id': set_device_id,
'auto_tune_mode': set_auto_tune_mode,
'max_call_depth': set_max_call_depth,
'profiling_options': set_profiling_options,
'variable_memory_max_size': set_variable_memory_max_size,
'max_device_memory': set_max_device_memory,
'print_file_path': set_print_file_path,
'env_config_path': set_env_config_path
}
@property
def reserve_class_name_in_scope(self):
"""Gets whether to save the network class name in the scope."""
return self._thread_local_info.reserve_class_name_in_scope
@reserve_class_name_in_scope.setter
def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
"""Sets whether to save the network class name in the scope."""
self._thread_local_info.reserve_class_name_in_scope = reserve_class_name_in_scope
@property
def enable_ge(self):
return self._context_handle.get_backend_policy() == 'ge'
@property
def enable_debug_runtime(self):
return self._thread_local_info.debug_runtime
@enable_debug_runtime.setter
def enable_debug_runtime(self, enable):
thread_info = self._thread_local_info
thread_info.debug_runtime = enable
def _context():
"""
Get the global _context, if context is not created, create a new one.
Returns:
_Context, the global context in PyNative mode.
"""
global _k_context
if _k_context is None:
default_backend = 'debug'
try:
from mindspore import default_config
default_backend = default_config.__backend__
except ImportError:
logger.error("import default config fail")
_k_context = _Context()
_k_context.enable_debug_runtime = False
if default_backend == 'debug':
_k_context.enable_debug_runtime = True
default_backend = 'vm'
_k_context.set_backend_policy(default_backend)
return _k_context
@args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool, parallel_mode=str,
auto_parallel_search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool,
all_reduce_fusion_config=list, pipeline_stages=int)
def set_auto_parallel_context(**kwargs):
r"""
Set auto parallel context, which is valid only for Ascend and GPU target.
Auto parallel context should be configured before the initialization of your network.
Note:
Attribute name is required for setting attributes.
If a program has tasks with different parallel modes, then before setting new parallel mode for the
next task, interface mindspore.context.reset_auto_parallel_context() needs to be called to reset
the configuration.
Setting or changing parallel modes must be called before any creating Initializer, otherwise,
RuntimeError may be raised when compiling the network.
Some configurations are parallel mode specific, see the below table for details:
=========================== ===========================
Common AUTO_PARALLEL
=========================== ===========================
device_num gradient_fp32_sync
global_rank loss_repeated_mean
gradients_mean auto_parallel_search_mode
parallel_mode strategy_ckpt_load_file
all_reduce_fusion_config strategy_ckpt_save_file
enable_parallel_optimizer full_batch
\ pipeline_stages
=========================== ===========================
Args:
device_num (int): Available device number, the value must be in [1, 4096]. Default: 1.
global_rank (int): Global rank id, the value must be in [0, 4095]. Default: 0.
gradients_mean (bool): Whether to perform mean operator after allreduce of gradients.
"stand_alone" do not support gradients_mean. Default: False.
gradient_fp32_sync (bool): Run allreduce of gradients in fp32.
"stand_alone", "data_parallel" and "hybrid_parallel" do not support
gradient_fp32_sync. Default: True.
parallel_mode (str): There are five kinds of parallel modes, "stand_alone", "data_parallel",
"hybrid_parallel", "semi_auto_parallel" and "auto_parallel". Default: "stand_alone".
- stand_alone: Only one processor is working.
- data_parallel: Distributes the data across different processors.
- hybrid_parallel: Achieves data parallelism and model parallelism manually.
- semi_auto_parallel: Achieves data parallelism and model parallelism by
setting parallel strategies.
- auto_parallel: Achieving parallelism automatically.
auto_parallel_search_mode (str): There are two kinds of shard strategy search modes, "recursive_programming"
and "dynamic_programming". Default: "dynamic_programming".
- recursive_programming: Recursive programming search mode.
- dynamic_programming: Dynamic programming search mode.
parameter_broadcast (bool): Whether to broadcast parameters before training. Before training, in order to have
the same network initialization parameter values for all devices, broadcast the parameters
on device 0 to other devices. Parameter broadcasting in different parallel modes is different,
data_parallel mode, all parameters are broadcast except for the parameter whose attribute
layerwise_parallel is True. Hybrid_parallel, semi_auto_parallel and auto_parallel mode, the
segmented parameters do not participate in broadcasting. Default: False.
strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint. Default: ''
strategy_ckpt_save_file (str): The path to save parallel strategy checkpoint. Default: ''
full_batch (bool): If you load whole batch datasets in auto_parallel mode, this parameter
should be set with True. Default: False.
enable_parallel_optimizer (bool): This is a developing feature, which shards the weight update computation for
data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
`Lamb` and `AdamWeightDecay` in Ascend . Default: False.
all_reduce_fusion_config (list): Set allreduce fusion strategy by parameters indices. Only support ReduceOp.SUM
and HCCL_WORLD_GROUP/NCCL_WORLD_GROUP. No Default, if it is not set, the fusion is closed.
pipeline_stages (int): Set the stage information for pipeline parallel. This indicates how
the devices are distributed alone the pipeline. The total devices will be divided into
'pipeline_stags' stages. This currently could only be used when
parallel mode semi_auto_parallel is enabled. Default: 1.
Raises:
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> context.set_auto_parallel_context(device_num=8)
>>> context.set_auto_parallel_context(global_rank=0)
>>> context.set_auto_parallel_context(gradients_mean=True)
>>> context.set_auto_parallel_context(gradient_fp32_sync=False)
>>> context.set_auto_parallel_context(parallel_mode="auto_parallel")
>>> context.set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
>>> context.set_auto_parallel_context(parameter_broadcast=False)
>>> context.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
>>> context.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
>>> context.set_auto_parallel_context(full_batch=True)
>>> context.set_auto_parallel_context(enable_parallel_optimizer=False)
>>> context.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
>>> context.set_auto_parallel_context(pipeline_stages=2)
"""
_set_auto_parallel_context(**kwargs)
def get_auto_parallel_context(attr_key):
"""
Gets auto parallel context attribute value according to the key.
Args:
attr_key (str): The key of the attribute.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in auto parallel context.
"""
return _get_auto_parallel_context(attr_key)
def reset_auto_parallel_context():
"""
Reset auto parallel context attributes to the default values:
- device_num: 1.
- global_rank: 0.
- gradients_mean: False.
- gradient_fp32_sync: True.
- parallel_mode: 'stand_alone'.
- auto_parallel_search_mode: 'dynamic_programming'.
- parameter_broadcast: False.
- strategy_ckpt_load_file: ''.
- strategy_ckpt_save_file: ''.
- full_batch: False.
- enable_parallel_optimizer: False.
- pipeline_stages: 1.
"""
_reset_auto_parallel_context()
def _check_target_specific_cfgs(device, arg_key):
"""Checking whether a config is suitable for a specified device"""
device_cfgs = {
'enable_auto_mixed_precision': ['Ascend'],
'enable_dump': ['Ascend'],
'save_dump_path': ['Ascend'],
'enable_graph_kernel': ['Ascend', 'GPU'],
'enable_reduce_precision': ['Ascend'],
'enable_profiling': ['Ascend'],
'profiling_options': ['Ascend'],
'print_file_path': ['Ascend'],
'variable_memory_max_size': ['Ascend'],
'auto_tune_mode': ['Ascend'],
'max_device_memory': ['GPU']
}
# configs not in map device_cfgs are supposed to be suitable for all devices
if not arg_key in device_cfgs:
return True
supported_devices = device_cfgs[arg_key]
if device in supported_devices:
return True
logger.warning(f"Config '{arg_key}' only supports devices in {supported_devices}, current device is '{device}'"
", ignore it.")
return False
@args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=bool,
save_graphs_path=str, enable_dump=bool, auto_tune_mode=str,
save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
enable_profiling=bool, profiling_options=str, enable_auto_mixed_precision=bool,
enable_graph_kernel=bool, check_bprop=bool, max_device_memory=str, print_file_path=str,
enable_sparse=bool, max_call_depth=int, env_config_path=str)
def set_context(**kwargs):
"""
Sets context for running environment.
Context should be configured before running your program. If there is no configuration,
the "Ascend" device target will be used by default. GRAPH_MODE or
PYNATIVE_MODE can be set by `mode` attribute and both modes support all backends, default
mode is PYNATIVE_MODE.
When the `save_graphs` attribute is set to True, attribute of `save_graphs_path` is used to set the
intermediate compilation graph storage path. By default, the graphs are saved in the current directory.
For other configurations and arguments, please refer to the corresponding module
description, the configuration is optional and can be enabled when needed.
Note:
Attribute name is required for setting attributes.
The mode is not recommended to be changed after net was initialized because the implementations of some
operations are different in graph mode and pynative mode. Default: PYNATIVE_MODE.
Some configurations are device specific, see the below table for details:
=========================== =========================== =================
Common(CPU/GPU/Ascend) Ascend GPU
=========================== =========================== =================
check_bprop print_file_path max_device_memory
device_id enable_dump enable_graph_kernel
device_target save_dump_path
enable_sparse enable_graph_kernel
max_call_depth enable_reduce_precision
mode enable_profiling
reserve_class_name_in_scope profiling_options
save_graphs variable_memory_max_size
save_graphs_path auto_tune_mode
env_config_path
grad_for_scalar
=========================== =========================== =================
Args:
mode (int): Running in GRAPH_MODE(0) or PYNATIVE_MODE(1). Default: PYNATIVE_MODE(1).
device_target (str): The target device to run, support "Ascend", "GPU", and "CPU". Default: "Ascend".
device_id (int): ID of the target device, the value must be in [0, device_num_per_host-1],
while device_num_per_host should be no more than 4096. Default: 0.
save_graphs (bool): Whether to save graphs. Default: False.
save_graphs_path (str): Path to save graphs. Default: ".".
If the program is executed in the parallel mode, `save_graphs_path` should consist of the path and the
current device id, to ensure that writing file conflicts won't happen when the different processes try to
create the files in the same directory. For example, the `device_id` can be generated by
`device_id = os.getenv("DEVICE_ID")` and the `save_graphs_path` can be set by
`context.set_context(save_graphs_path="path/to/ir/files"+device_id)`.
enable_graph_kernel (bool): Whether to enable composition of basic primitives. These primitives would be
compiled into a fused kernel automatically. Default: False.
reserve_class_name_in_scope (bool) : Whether to save the network class name in the scope. Default: True.
enable_reduce_precision (bool): Whether to enable precision reduction. Default: True.
enable_dump (bool): Whether to enable dump. Default: False.
save_dump_path (str): When the program is executed on Ascend, operators can dump data in this path.
The root dump path is configured in /home/HwHiAiUser/ide_daemon/ide_daemon.cfg.
So the real dump path is "{configured root dump path}/{`save_dump_path`}". Default: ".".
variable_memory_max_size (str): Set the maximum size of the variable memory max size. Default: "0GB".
enable_profiling (bool): Whether to open profiling. Default: False.
profiling_options (str): Set profiling collection options, operators can profiling data here.
The values of profiling collection options are as follows, supporting the collection of multiple data.
- output: the saving the path of the profiling collection result file. The directory spectified by this
parameter needs to be created in advance on the training environment (container or host side) and ensure
that the running user configured during installation has read and write permissions.It supports the
configuration of absolute or relative paths(relative to the current path when executing the command line).
The absolute path configuration starts with '/', for example:/home/data/output.
The relative path configuration directly starts with the directory name,for example:output.
- training_trace: collect iterative trajectory data, that is, the training task and software information of
the AI software stack, to achieve performance analysis of the training task, focusing on data
enhancement, forward and backward calculation, gradient aggregation update and other related data.
The value is on/off.
- task_trace: collect task trajectory data, that is, the hardware information of the HWTS/AICore of
the Ascend 910 processor, and analyze the information of beginning and ending of the task.
The value is on/off.
- aicpu: collect profiling data enhanced by aicpu data. The value is on/off.
- fp_point: specify the start position of the forward operator of the training network iteration trajectory,
which is used to record the start timestamp of the forward calculation.The configuration value is the name
of the first operator specified in the forward direction. when the value is empty,the system will
automatically obtain the forward operator name.
- bp_point: specify the end position of the iteration trajectory reversal operator of the training network,
record the end timestamp of the backward calculation. The configuration value is the name of the operator
after the specified reverse. when the value is empty,the system will automatically obtain the backward
operator name.
- aic_metrics: the values are as follows:
ArithmeticUtilization: percentage statistics of various calculation indicators.
PipeUtilization: the time-consuming ratio of calculation unit and handling unit,this item is
the default value.
Memory: percentage of external memory read and write instructions.
MemoryL0: percentage of internal memory read and write instructions.
ResourceConflictRatio: proportion of pipline queue instructions.
The profiling_options is like '{"output":'/home/data/output','training_trace':'on'}'
check_bprop (bool): Whether to check bprop. Default: False.
max_device_memory (str): Sets the maximum memory available for devices.
Currently, it is only supported on GPU. The format is "xxGB". Default: "1024GB".
print_file_path (str): The path of saving print data. If this parameter is set, print data is saved to
a file by default, and turns off printing to the screen. If the file already exists, add a timestamp
suffix to the file. Default: ''.
enable_sparse (bool): Whether to enable sparsity feature. Default: False.
max_call_depth (int): Specify the maximum depth of function call. Default: 1000.
env_config_path (str): Config path for DFX.
auto_tune_mode (str): The mode of auto tune when op building, get the best tiling performance,
default: NO_TUNE. The value must be in ['RL', 'GA', 'RL,GA'].
RL: rl_tune;
GA: ga_tune;
RL,GA: rl_tune/ga_tune(Automatic selection).
- rl_tune: Reinforecement Learning tune.
- ga_tune: Genetic Algorithm tune.
grad_for_scalar (bool): Whether to get gradient for scalar. Default: False.
Raises:
ValueError: If input key is not an attribute in context.
Examples:
>>> context.set_context(mode=context.GRAPH_MODE)
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> context.set_context(device_target="Ascend")
>>> context.set_context(device_id=0)
>>> context.set_context(save_graphs=True, save_graphs_path="./model.ms")
>>> context.set_context(enable_reduce_precision=True)
>>> context.set_context(enable_dump=True, save_dump_path=".")
>>> context.set_context(reserve_class_name_in_scope=True)
>>> context.set_context(variable_memory_max_size="6GB")
>>> context.set_context(mode=context.GRAPH_MODE,
... device_target="Ascend",device_id=0, save_graphs=True,
... save_graphs_path="/mindspore")
>>> context.set_context(enable_profiling=True,
... profiling_options='{"output":"/home/data/output","training_trace":"on"}')
>>> context.set_context(max_device_memory="3.5GB")
>>> context.set_context(print_file_path="print.pb")
>>> context.set_context(max_call_depth=80)
>>> context.set_context(env_config_path="./env_config.json")
"""
ctx = _context()
# set device target first
if 'device_target' in kwargs:
ctx.set_device_target(kwargs['device_target'])
device = ctx.get_param(ms_ctx_param.device_target)
if not device.lower() in __device_target__:
raise ValueError(f"Error, package type {__package_name__} support device type {__device_target__}, "
f"but got device target {device}")
device = ctx.get_param(ms_ctx_param.device_target)
for key, value in kwargs.items():
if not _check_target_specific_cfgs(device, key):
continue
if hasattr(ctx, key):
setattr(ctx, key, value)
continue
if key in ctx.setters:
ctx.setters[key](ctx, value)
continue
# enum variables beginning with '_' are for internal use
if key in ms_ctx_param.__members__ and key[0] != '_':
ctx.set_param(ms_ctx_param.__members__[key], value)
continue
raise ValueError("Set context keyword %s is not recognized!" % key)
def get_context(attr_key):
"""
Gets context attribute value according to the input key.
Args:
attr_key (str): The key of the attribute.
Returns:
Object, The value of given attribute key.
Raises:
ValueError: If input key is not an attribute in context.
"""
ctx = _context()
device = ctx.get_param(ms_ctx_param.device_target)
_ = _check_target_specific_cfgs(device, attr_key)
if hasattr(ctx, attr_key):
return getattr(ctx, attr_key)
# enum variables beginning with '_' are for internal use
if attr_key in ms_ctx_param.__members__ and attr_key[0] != '_':
return ctx.get_param(ms_ctx_param.__members__[attr_key])
raise ValueError("Get context keyword %s is not recognized!" % attr_key)
class ParallelMode:
"""
Parallel mode options.
There are five kinds of parallel modes, "STAND_ALONE", "DATA_PARALLEL",
"HYBRID_PARALLEL", "SEMI_AUTO_PARALLEL" and "AUTO_PARALLEL". Default: "STAND_ALONE".
- STAND_ALONE: Only one processor is working.
- DATA_PARALLEL: Distributes the data across different processors.
- HYBRID_PARALLEL: Achieves data parallelism and model parallelism manually.
- SEMI_AUTO_PARALLEL: Achieves data parallelism and model parallelism by setting parallel strategies.
- AUTO_PARALLEL: Achieves parallelism automatically.
MODE_LIST: The list of all supported parallel modes.
"""
STAND_ALONE = "stand_alone"
DATA_PARALLEL = "data_parallel"
HYBRID_PARALLEL = "hybrid_parallel"
SEMI_AUTO_PARALLEL = "semi_auto_parallel"
AUTO_PARALLEL = "auto_parallel"
MODE_LIST = [STAND_ALONE, DATA_PARALLEL, HYBRID_PARALLEL, SEMI_AUTO_PARALLEL, AUTO_PARALLEL]
@args_type_check(enable_ps=bool)
def set_ps_context(**kwargs):
"""
Set parameter server training mode context.
Note:
Some other environment variables should also be set for parameter server training mode.
These environment variables are listed below:
MS_SERVER_NUM # Server number
MS_WORKER_NUM # Worker number
MS_SCHED_HOST # Scheduler IP address
MS_SCHED_PORT # Scheduler port
MS_ROLE # The role of this process:
MS_SCHED #represents the scheduler,
MS_WORKER #represents the worker,
MS_PSERVER #represents the Server
Args:
enable_ps (bool): Whether to enable parameter server training mode.
Only after enable_ps is set True, the environment variables will be effective.
Default: False.
Raises:
ValueError: If input key is not the attribute in parameter server training mode context.
Examples:
>>> context.set_ps_context(enable_ps=True)
"""
_set_ps_context(**kwargs)
def get_ps_context(attr_key):
"""
Get parameter server training mode context attribute value according to the key.
Args:
attr_key (str): The key of the attribute.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in auto parallel context.
"""
return _get_ps_context(attr_key)
def reset_ps_context():
"""
Reset parameter server training mode context attributes to the default values:
- enable_ps: False.
"""
_reset_ps_context()
|
the-stack_0_12207 | import torchvision.transforms as transforms
import torch
from PIL import Image, ImageOps
import random
import utils.utils2.transforms as local_transforms
"""
As mentioned in http://pytorch.org/docs/master/torchvision/models.html
All pre-trained models expect input images normalized in the same way, i.e. mini-batches
of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least
224. The images have to be loaded in to a range of [0, 1] and then normalized using
ean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225].
NOTE: transforms.ToTensor() transforms the incoming data to range of [0, 1]. It also
converts [H x W x C] to [C x H x W], which is expected by PyTorch models.
"""
# For now we will use PyTorch model zoo models
pytorch_zoo_normaliser = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# inception_normaliser = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
class MyRandomCrop(object):
def __init__(self, size):
"""
This is a variant of torchvision's RandomCrop. This one pads image only if
the image is smaller than the intended size. Image will be padded to the
right and bottom.
:param size: tuple (width, height)
"""
self.size = size
def __call__(self, img):
width, height = img.size
target_width, target_height = self.size
pad_width = 0
pad_height = 0
do_padding = False
if width < target_width:
pad_width = target_width - width
do_padding = True
if height < target_height:
pad_height = target_height - height
do_padding = True
#
pad = (0, 0, pad_width, pad_height)
if do_padding:
img = ImageOps.expand(img, border=pad, fill=0)
width, height = img.size
if width == target_width and height == target_height:
return img
x1 = random.randint(0, width - target_width)
y1 = random.randint(0, height - target_height)
return img.crop((x1, y1, x1 + target_width, y1 + target_height))
def get_transformer_crop(crop_img_size, # 224 or more expected by PyTorch model zoo
scale_img_size,
normaliser = pytorch_zoo_normaliser,
do_augment=False):
if do_augment:
# This is a augmented transformation,
return transforms.Compose([transforms.Scale(scale_img_size),
MyRandomCrop((crop_img_size, crop_img_size)),
transforms.RandomHorizontalFlip(),
local_transforms.ColorJitter(0.4, 0.4, 0.4, 0),
# TODO - Add more transformations
transforms.ToTensor(),
normaliser])
else:
# This is a vanilla transformation
return transforms.Compose([transforms.Scale(scale_img_size),
MyRandomCrop((crop_img_size, crop_img_size)),
transforms.ToTensor(),
normaliser])
def get_transformer(img_size, # 224 or more expected by PyTorch model zoo
normaliser = pytorch_zoo_normaliser,
do_augment=False):
if do_augment:
# This is a augmented transformation,
return transforms.Compose([transforms.Scale((img_size, img_size)),
transforms.RandomHorizontalFlip(),
local_transforms.ColorJitter(0.4, 0.4, 0.4, 0),
transforms.ToTensor(),
normaliser])
else:
# This is a vanilla transformation
return transforms.Compose([transforms.Scale((img_size, img_size)),
transforms.ToTensor(),
normaliser])
def get_test_valid_transformer_crop(crop_img_size,
scale_img_size,
normaliser=pytorch_zoo_normaliser):
"""Transformation for Validation and Test set"""
# TODO, implement TTA
# NOTE: With the below logic, one might want to do multiple inference on the same
# image, because there is some randomness, we do not know how big the image is
return transforms.Compose([transforms.Resize(scale_img_size),
MyRandomCrop((crop_img_size, crop_img_size)),
transforms.ToTensor(),
normaliser])
def get_test_valid_transformer(img_size,
normaliser=pytorch_zoo_normaliser):
"""Transformation for Validation and Test set"""
# TODO, implement TTA
# NOTE: With the below logic, one might want to do multiple inference on the same
# image, because there is some randomness, we do not know how big the image is
return transforms.Compose([transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
normaliser]) |
the-stack_0_12208 | import unittest
import endurox as e
import exutils as u
class TestTpencrypt(unittest.TestCase):
# Test data encryption
def test_tpencrypt_ok(self):
w = u.NdrxStopwatch()
while w.get_delta_sec() < u.test_duratation():
# binary data:
buf=e.tpencrypt(b'\x00\x01\xff')
self.assertNotEqual(buf, b'\x00\x01\xff')
buf_org=e.tpdecrypt(buf)
self.assertEqual(buf_org, b'\x00\x01\xff')
# string based:
buf=e.tpencrypt("HELLO WORLD")
self.assertNotEqual(buf, "HELLO WORLD")
buf_org=e.tpdecrypt(buf)
self.assertEqual(buf_org, "HELLO WORLD")
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12210 | import time, datetime
class Sensor():
def __init__(self, bme680):
self.sensor = bme680.BME680();
def initialise(self,bme680):
self.sensor.set_humidity_oversample(bme680.OS_2X)
self.sensor.set_pressure_oversample(bme680.OS_4X)
self.sensor.set_temperature_oversample(bme680.OS_8X)
self.sensor.set_filter(bme680.FILTER_SIZE_3)
self.sensor.set_gas_status(bme680.ENABLE_GAS_MEAS)
self.sensor.set_gas_heater_temperature(320)
self.sensor.set_gas_heater_duration(150)
self.sensor.select_gas_heater_profile(0)
def getGasSensorBaseline(self):
start_time = time.time()
curr_time = time.time()
# takes 5 minutes to complete
burn_in_time = 300
burn_in_data = []
print("Collecting gas resistance burn-in data for 5 mins\n")
while curr_time - start_time < burn_in_time:
curr_time = time.time()
if self.sensor.get_sensor_data() and self.sensor.data.heat_stable:
gas = self.sensor.data.gas_resistance
burn_in_data.append(gas)
print("Gas: {0} Ohms".format(gas))
time.sleep(1)
gas_baseline = sum(burn_in_data[-50:]) / 50.0
#print("Gas baseline: {0} Ohms, humidity baseline: {1:.2f} %RH\n".format(gas_baseline, hum_baseline))
return gas_baseline;
def getAirQualityScore(self, gas_baseline):
gas = self.sensor.data.gas_resistance
gas_offset = gas_baseline - gas
# Set the humidity baseline to 40%, an optimal indoor humidity.
hum_baseline = 40.0
# This sets the balance between humidity and gas reading in the
# calculation of air_quality_score (25:75, humidity:gas)
hum_weighting = 0.25
hum = self.sensor.data.humidity
hum_offset = hum - hum_baseline
# Calculate hum_score as the distance from the hum_baseline.
if hum_offset > 0:
hum_score = (100 - hum_baseline - hum_offset) / (100 - hum_baseline) * (hum_weighting * 100)
else:
hum_score = (hum_baseline + hum_offset) / hum_baseline * (hum_weighting * 100)
# Calculate gas_score as the distance from the gas_baseline.
if gas_offset > 0:
gas_score = (gas / gas_baseline) * (100 - (hum_weighting * 100))
else:
gas_score = 100 - (hum_weighting * 100)
# Calculate air_quality_score
air_quality_score = hum_score + gas_score
return air_quality_score;
def getData(self, gas_baseline):
data_dict = {};
output = '';
if self.sensor.get_sensor_data() and self.sensor.data.heat_stable:
data_dict['timestamp'] = datetime.datetime.now().replace(microsecond=0).isoformat();
data_dict['temperature'] = self.sensor.data.temperature;
data_dict['pressure'] = self.sensor.data.pressure;
data_dict['humidity'] = self.sensor.data.humidity;
data_dict['airq'] = self.getAirQualityScore(gas_baseline);
else:
data_dict['timestamp'] = datetime.datetime.now().replace(microsecond=0).isoformat();
data_dict['temperature'] = 0;
data_dict['pressure'] = 0;
data_dict['humidity'] = 0;
data_dict['airq'] = 0;
return data_dict;
|
the-stack_0_12212 | import numpy as np
import pyqtgraph as pg
from scipy import signal
from acconeer_utils.clients.reg.client import RegClient
from acconeer_utils.clients.json.client import JSONClient
from acconeer_utils.clients import configs
from acconeer_utils import example_utils
from acconeer_utils.pg_process import PGProcess, PGProccessDiedException
def main():
args = example_utils.ExampleArgumentParser(num_sens=1).parse_args()
example_utils.config_logging(args)
if args.socket_addr:
client = JSONClient(args.socket_addr)
else:
port = args.serial_port or example_utils.autodetect_serial_port()
client = RegClient(port)
config = get_base_config()
config.sensor = args.sensors
client.setup_session(config)
pg_updater = PGUpdater(config)
pg_process = PGProcess(pg_updater)
pg_process.start()
client.start_streaming()
interrupt_handler = example_utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
processor = PresenceDetectionProcessor(config)
while not interrupt_handler.got_signal:
info, sweep = client.get_next()
plot_data = processor.process(sweep)
if plot_data is not None:
try:
pg_process.put_data(plot_data)
except PGProccessDiedException:
break
print("Disconnecting...")
pg_process.close()
client.disconnect()
def get_base_config():
config = configs.IQServiceConfig()
config.range_interval = [0.4, 0.8]
config.sweep_rate = 60
config.gain = 0.6
return config
class PresenceDetectionProcessor:
def __init__(self, config):
self.config = config
# Settings
n_dft = 15 # Data length for frequency estimation [s] | 20
t_freq_est = 0.5 # Time between frequency estimations [s] | 2
tau_iq = 0.04 # Time constant low-pass filter on IQ-data [s] | 0.04
self.f_s = self.config.sweep_rate # Time constant low-pass filter on IQ-data [s] | 150
self.D = 124 # Spatial or Range down sampling factor | 124
self.f_low = 0.1 # Lowest frequency of interest [Hz] | 0.1
self.f_high = 1.0 # Highest frequency of interest [Hz] | 1
self.M = int(self.f_s / 10) # Time down sampling for DFT | 40 f_s/M ~ 10 Hz
self.lambda_p = 40 # Threshold: spectral peak to noise ratio [1] | 50
self.lamda_05 = 6 # Threshold: ratio fundamental and half harmonic
self.interpolate = True # Interpolation between DFT points
self.delta_f = 1 / n_dft
self.dft_f_vec = np.arange(self.f_low, self.f_high, self.delta_f)
self.dft_points = np.size(self.dft_f_vec)
# Butterworth bandpass filter
f_n = self.f_s / 2
v_low = self.f_low / f_n
v_high = self.f_high / f_n
self.b, self.a = signal.butter(4, [v_low, v_high], btype="bandpass")
# Exponential lowpass filter
self.alpha_iq = np.exp(-2 / (self.f_s * tau_iq))
self.alpha_phi = np.exp(-2 * self.f_low / self.f_s)
# Parameter init
self.sweeps_in_block = int(np.ceil(n_dft * self.f_s))
self.new_sweeps_per_results = int(np.ceil(t_freq_est * self.f_s))
self.phi_vec = np.zeros((self.sweeps_in_block, 1))
self.f_est_vec = np.zeros(1)
self.f_dft_est_vec = np.zeros(1)
self.snr_vec = 0
self.sweep_index = 0
def process(self, sweep):
if self.sweep_index == 0:
delay_points = int(np.ceil(np.size(sweep) / self.D))
self.data_s_d_mat = np.zeros((self.sweeps_in_block, delay_points), dtype="complex")
self.data_s_d_mat[self.sweep_index, :] = self.downsample(sweep, self.D)
out_data = None
elif self.sweep_index < self.sweeps_in_block:
self.data_s_d_mat[self.sweep_index, :] = self.iq_lp_filter_time(
self.data_s_d_mat[self.sweep_index - 1, :],
self.downsample(sweep, self.D)
)
temp_phi = self.unwrap_phase(
self.phi_vec[self.sweep_index - 1],
self.data_s_d_mat[self.sweep_index, :],
self.data_s_d_mat[self.sweep_index - 1, :]
)
self.phi_vec[self.sweep_index] = self.unwrap_phase(
self.phi_vec[self.sweep_index - 1],
self.data_s_d_mat[self.sweep_index, :],
self.data_s_d_mat[self.sweep_index - 1, :]
)
phi_filt = signal.lfilter(self.b, self.a, self.phi_vec, axis=0)
out_data = {
"phi_raw": self.phi_vec,
"phi_filt": phi_filt,
"power_spectrum": np.zeros(self.dft_points),
"x_dft": np.linspace(self.f_low, self.f_high, self.dft_points),
"f_dft_est_hist": self.f_dft_est_vec,
"f_est_hist": self.f_est_vec,
"f_dft_est": 0,
"f_est": 0,
"f_low": self.f_low,
"f_high": self.f_high,
"snr": 0,
"lambda_p": self.lambda_p,
"dist_range": self.config.range_interval,
"init_progress": round(100 * self.sweep_index / self.sweeps_in_block),
}
else:
# Lowpass filter IQ data downsampled in distance points
self.data_s_d_mat = np.roll(self.data_s_d_mat, -1, axis=0)
self.data_s_d_mat[-1, :] = self.iq_lp_filter_time(
self.data_s_d_mat[-1, :],
self.downsample(sweep, self.D)
)
# Phase unwrapping of IQ data
temp_phi = self.unwrap_phase(
self.phi_vec[-1],
self.data_s_d_mat[-1, :],
self.data_s_d_mat[-2, :]
)
self.phi_vec = np.roll(self.phi_vec, -1, axis=0)
self.phi_vec[-1] = temp_phi
if np.mod(self.sweep_index, self.new_sweeps_per_results - 1) == 0:
# Bandpass filter unwrapped data
phi_filt_vec = signal.lfilter(self.b, self.a, self.phi_vec, axis=0)
P, dft_est, _ = self.dft(self.downsample(phi_filt_vec, self.M))
f_breath_est, _, snr, _ = self.breath_freq_est(P)
self.f_est_vec = np.append(self.f_est_vec, f_breath_est)
self.f_dft_est_vec = np.append(self.f_dft_est_vec, dft_est)
self.snr_vec = np.append(self.snr_vec, snr)
out_data = {
"phi_raw": self.phi_vec,
"phi_filt": phi_filt_vec,
"power_spectrum": P,
"x_dft": np.linspace(self.f_low, self.f_high, self.dft_points),
"f_dft_est_hist": self.f_dft_est_vec,
"f_est_hist": self.f_est_vec,
"f_dft_est": dft_est,
"f_est": f_breath_est,
"f_low": self.f_low,
"f_high": self.f_high,
"snr": snr,
"lambda_p": self.lambda_p,
"dist_range": self.config.range_interval,
"init_progress": None,
}
else:
out_data = None
self.sweep_index += 1
return out_data
def downsample(self, data, n):
return data[::n]
def iq_lp_filter_time(self, state, new_data):
return self.alpha_iq * state + (1 - self.alpha_iq) * new_data
def unwrap_phase(self, phase_lp, data_1, data_2):
return phase_lp * self.alpha_phi + np.angle(np.mean(data_2 * np.conjugate(data_1)))
def dft(self, data):
data = np.squeeze(data)
n_vec = np.arange(data.size) * self.M
dft = np.exp((2j * np.pi / self.f_s) * np.outer(self.dft_f_vec, n_vec))
P = np.square(np.abs(np.matmul(dft, data)))
idx_f = np.argmax(P)
dft_est = self.dft_f_vec[idx_f]
return P, dft_est, P[idx_f]
def noise_est(self, P):
return np.mean(np.sort(P)[:(self.dft_points//2)-1])
def half_peak_frequency(self, P, f_est):
idx_half = int(f_est / (2 * self.delta_f))
if idx_half < self.f_low:
return 0
else:
return (1 / self.delta_f) * (
(self.dft_f_vec[idx_half+1] - f_est / 2) * P[idx_half]
+ (f_est/2 - self.dft_f_vec[idx_half]) * P[idx_half + 1]
)
def breath_freq_est(self, P):
f_idx = np.argmax(P)
P_peak = P[f_idx]
if self.interpolate:
f_est, P_peak = self.freq_quad_interpolation(P)
else:
f_est = self.dft_f_vec[f_idx]
P_half = self.half_peak_frequency(P, f_est)
if (P_peak < self.lamda_05 * P_half):
f_est = f_est / 2
P_peak = P_half
if self.f_low < f_est < self.f_high and P_peak > self.lambda_p*self.noise_est(P):
f_est_valid = True
else:
f_est_valid = False
f_est = 0
snr = P_peak / self.noise_est(P)
return f_est, P_peak, snr, f_est_valid
def freq_quad_interpolation(self, P):
f_idx = np.argmax(P)
if 0 < f_idx < P.size and P.size > 3:
f_est = self.dft_f_vec[f_idx] \
+ self.delta_f / 2 * (
(np.log(P[f_idx+1])-np.log(P[f_idx-1]))
/ (2*np.log(P[f_idx]) - np.log(P[f_idx+1]) - np.log(P[f_idx-1]))
)
P_peak = P[f_idx] + np.exp(
1/8 * np.square(np.log(P[f_idx+1]) - np.log(P[f_idx-1]))
/ (2*np.log(P[f_idx]) - np.log(P[f_idx+1]) - np.log(P[f_idx-1]))
)
if not (self.f_low < f_est < self.f_high):
f_est = 0
else:
f_est = 0
P_peak = 0
return f_est, P_peak
class PGUpdater:
def __init__(self, config):
self.config = config
def setup(self, win):
win.resize(800, 600)
win.setWindowTitle("Acconeer sleep breathing estimation example")
phi_title = "Breathing motion (detection range: {} m to {} m)" \
.format(*self.config.range_interval)
self.phi_plot = win.addPlot(title=phi_title)
self.phi_plot.showGrid(x=True, y=True)
self.phi_plot.setLabel("left", "Amplitude")
self.phi_plot.setLabel("bottom", "Samples")
self.phi_plot.addLegend()
self.filt_phi_curve = self.phi_plot.plot(
pen=example_utils.pg_pen_cycler(0),
name="Filtered",
)
self.raw_phi_curve = self.phi_plot.plot(
pen=example_utils.pg_pen_cycler(1),
name="Raw",
)
win.nextRow()
self.spect_plot = win.addPlot(title="Power spectrum")
self.spect_plot.showGrid(x=True, y=True)
self.spect_plot.setLabel("left", "Power")
self.spect_plot.setLabel("bottom", "Frequency (Hz)")
self.spect_curve = self.spect_plot.plot(pen=example_utils.pg_pen_cycler(1))
self.spect_smax = example_utils.SmoothMax(self.config.sweep_rate / 15)
self.spect_dft_inf_line = pg.InfiniteLine(pen=example_utils.pg_pen_cycler(1, "--"))
self.spect_plot.addItem(self.spect_dft_inf_line)
self.spect_est_inf_line = pg.InfiniteLine(pen=example_utils.pg_pen_cycler(0, "--"))
self.spect_plot.addItem(self.spect_est_inf_line)
self.spect_plot.setXRange(0, 1)
self.spect_plot.setYRange(0, 1)
self.spect_text_item = pg.TextItem("Initiating...", anchor=(0.5, 0.5), color="k")
self.spect_text_item.setPos(0.5, 0.5)
self.spect_plot.addItem(self.spect_text_item)
win.nextRow()
self.fest_plot = win.addPlot(title="Breathing estimation history")
self.fest_plot.showGrid(x=True, y=True)
self.fest_plot.setLabel("left", "Frequency (Hz)")
self.fest_plot.setLabel("bottom", "Samples")
self.fest_plot.addLegend()
self.fest_curve = self.fest_plot.plot(
pen=example_utils.pg_pen_cycler(0),
name="Breathing est.",
)
self.fest_dft_curve = self.fest_plot.plot(
pen=example_utils.pg_pen_cycler(1),
name="DFT est.",
)
self.fest_plot.setXRange(0, 1)
self.fest_plot.setYRange(0, 1.2)
self.fest_text_item = pg.TextItem(anchor=(0, 0), color="k")
self.fest_text_item.setPos(0, 1.2)
self.fest_plot.addItem(self.fest_text_item)
def update(self, data):
self.filt_phi_curve.setData(np.squeeze(data["phi_filt"]))
self.raw_phi_curve.setData(np.squeeze(data["phi_raw"]))
if data["init_progress"] is not None:
self.spect_text_item.setText("Initiating: {} %".format(data["init_progress"]))
else:
snr = data["snr"]
if snr == 0:
s = "SNR: N/A | {:.0f} dB".format(10*np.log10(data["lambda_p"]))
else:
fmt = "SNR: {:.0f} | {:.0f} dB"
s = fmt.format(10*np.log10(snr), 10*np.log10(data["lambda_p"]))
self.spect_text_item.setText(s)
self.spect_text_item.setAnchor((0, 1))
self.spect_text_item.setPos(0, 0)
f_est = data["f_est"]
if f_est > 0:
s = "Latest frequency estimate: {:.2f} Hz | {:.0f} BPM".format(f_est, f_est*60)
self.fest_text_item.setText(s)
self.fest_plot.enableAutoRange(x=True)
self.spect_curve.setData(data["x_dft"], data["power_spectrum"])
self.spect_dft_inf_line.setValue(data["f_dft_est"])
self.spect_est_inf_line.setValue(data["f_est"])
self.spect_plot.setYRange(0, self.spect_smax.update(np.amax(data["power_spectrum"])))
self.fest_curve.setData(np.squeeze(data["f_est_hist"]))
self.fest_dft_curve.setData(np.squeeze(data["f_dft_est_hist"]))
if __name__ == "__main__":
main()
|
the-stack_0_12215 | """Authors: Cody Baker and Ben Dichter."""
from pathlib import Path
from datetime import datetime
from typing import Optional
import spikeextractors as se
from pynwb import NWBHDF5IO
from nwb_conversion_tools import NWBConverter, CEDRecordingInterface
from nwb_conversion_tools.utils.spike_interface import write_recording
from .cedstimulusinterface import CEDStimulusInterface
def quick_write(
ced_file_path: str,
session_description: str,
session_start: str,
save_path: str,
sorting: Optional[se.SortingExtractor] = None,
recording_lfp: Optional[se.RecordingExtractor] = None,
overwrite: bool = False,
):
"""Automatically extracts required session info from ced_file_path and writes NWBFile in spikeextractors."""
ced_file_path = Path(ced_file_path)
session_id = ced_file_path.stem
nwbfile_kwargs = dict(
session_description=session_description,
session_start_time=session_start.astimezone(),
session_id=session_id,
)
if sorting is not None:
se.NwbSortingExtractor.write_sorting(
sorting=sorting,
save_path=save_path,
overwrite=overwrite,
skip_properties=["mda_max_channel"],
skip_features=["waveforms"],
**nwbfile_kwargs
)
if recording_lfp is not None:
write_recording(recording=recording_lfp, save_path=save_path, write_as="lfp")
class CEDNWBConverter(NWBConverter):
data_interface_classes = dict(
CEDRecording=CEDRecordingInterface, CEDStimulus=CEDStimulusInterface
)
def __init__(self, source_data):
channel_info = self.data_interface_classes[
"CEDRecording"
].RX.get_all_channels_info(source_data["CEDRecording"]["file_path"])
rhd_channels = []
stim_channels = []
for ch, info in channel_info.items():
if "Rhd" in info["title"]:
rhd_channels.append(ch)
if info["title"] in ["CED_Mech", "MechTTL", "Laser"]:
stim_channels.append(ch)
source_data["CEDRecording"].update(smrx_channel_ids=rhd_channels)
source_data["CEDStimulus"].update(smrx_channel_ids=stim_channels)
super().__init__(source_data)
def get_metadata(self):
metadata = super().get_metadata()
smrx_file_path = Path(
self.data_interface_objects["CEDRecording"].source_data["file_path"]
)
session_id = smrx_file_path.stem
metadata["NWBFile"].update(
institution="EMBL - Heidelberg", lab="Mease", session_id=session_id
)
return metadata
|
the-stack_0_12216 | import numpy as np
import math
import torch.nn as nn
from .utils import unetConv2, unetUp, conv2DBatchNormRelu, conv2DBatchNorm
import torch
import torch.nn.functional as F
from models.layers.grid_attention_layer import GridAttentionBlock2D_TORR as AttentionBlock2D
from models.networks_other import init_weights
class sononet_grid_attention(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, in_channels=3, is_batchnorm=True, n_convs=None,
nonlocal_mode='concatenation', aggregation_mode='concat'):
super(sononet_grid_attention, self).__init__()
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
self.n_classes= n_classes
self.aggregation_mode = aggregation_mode
self.deep_supervised = True
if n_convs is None:
n_convs = [3, 3, 3, 2, 2]
filters = [64, 128, 256, 512]
filters = [int(x / self.feature_scale) for x in filters]
####################
# Feature Extraction
self.conv1 = unetConv2(self.in_channels, filters[0], self.is_batchnorm, n=n_convs[0])
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = unetConv2(filters[0], filters[1], self.is_batchnorm, n=n_convs[1])
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = unetConv2(filters[1], filters[2], self.is_batchnorm, n=n_convs[2])
self.maxpool3 = nn.MaxPool2d(kernel_size=2)
self.conv4 = unetConv2(filters[2], filters[3], self.is_batchnorm, n=n_convs[3])
self.maxpool4 = nn.MaxPool2d(kernel_size=2)
self.conv5 = unetConv2(filters[3], filters[3], self.is_batchnorm, n=n_convs[4])
################
# Attention Maps
self.compatibility_score1 = AttentionBlock2D(in_channels=filters[2], gating_channels=filters[3],
inter_channels=filters[3], sub_sample_factor=(1,1),
mode=nonlocal_mode, use_W=False, use_phi=True,
use_theta=True, use_psi=True, nonlinearity1='relu')
self.compatibility_score2 = AttentionBlock2D(in_channels=filters[3], gating_channels=filters[3],
inter_channels=filters[3], sub_sample_factor=(1,1),
mode=nonlocal_mode, use_W=False, use_phi=True,
use_theta=True, use_psi=True, nonlinearity1='relu')
#########################
# Aggreagation Strategies
self.attention_filter_sizes = [filters[2], filters[3]]
if aggregation_mode == 'concat':
self.classifier = nn.Linear(filters[2]+filters[3]+filters[3], n_classes)
self.aggregate = self.aggreagation_concat
else:
self.classifier1 = nn.Linear(filters[2], n_classes)
self.classifier2 = nn.Linear(filters[3], n_classes)
self.classifier3 = nn.Linear(filters[3], n_classes)
self.classifiers = [self.classifier1, self.classifier2, self.classifier3]
if aggregation_mode == 'mean':
self.aggregate = self.aggregation_sep
elif aggregation_mode == 'deep_sup':
self.classifier = nn.Linear(filters[2] + filters[3] + filters[3], n_classes)
self.aggregate = self.aggregation_ds
elif aggregation_mode == 'ft':
self.classifier = nn.Linear(n_classes*3, n_classes)
self.aggregate = self.aggregation_ft
else:
raise NotImplementedError
####################
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm2d):
init_weights(m, init_type='kaiming')
def aggregation_sep(self, *attended_maps):
return [ clf(att) for clf, att in zip(self.classifiers, attended_maps) ]
def aggregation_ft(self, *attended_maps):
preds = self.aggregation_sep(*attended_maps)
return self.classifier(torch.cat(preds, dim=1))
def aggregation_ds(self, *attended_maps):
preds_sep = self.aggregation_sep(*attended_maps)
pred = self.aggregation_concat(*attended_maps)
return [pred] + preds_sep
def aggregation_concat(self, *attended_maps):
return self.classifier(torch.cat(attended_maps, dim=1))
def forward(self, inputs):
# Feature Extraction
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
conv5 = self.conv5(maxpool4)
batch_size = inputs.shape[0]
pooled = F.adaptive_avg_pool2d(conv5, (1, 1)).view(batch_size, -1)
# Attention Mechanism
g_conv1, att1 = self.compatibility_score1(conv3, conv5)
g_conv2, att2 = self.compatibility_score2(conv4, conv5)
# flatten to get single feature vector
fsizes = self.attention_filter_sizes
g1 = torch.sum(g_conv1.view(batch_size, fsizes[0], -1), dim=-1)
g2 = torch.sum(g_conv2.view(batch_size, fsizes[1], -1), dim=-1)
return self.aggregate(g1, g2, pooled)
@staticmethod
def apply_argmax_softmax(pred):
log_p = F.softmax(pred, dim=1)
return log_p
|
the-stack_0_12218 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from setuptools import setup, find_packages, Extension
from setuptools import Extension, find_packages, setup
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libbase",
sources=[
"fairseq/clib/libbase/balanced_assignment.cpp",
],
)
]
)
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat",
sources=[
"fairseq/clib/libnat/edit_dist.cpp",
],
)
]
)
if "CUDA_HOME" in os.environ:
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat_cuda",
sources=[
"fairseq/clib/libnat_cuda/edit_dist.cu",
"fairseq/clib/libnat_cuda/binding.cpp",
],
),
cpp_extension.CppExtension(
"fairseq.ngram_repeat_block_cuda",
sources=[
"fairseq/clib/cuda/ngram_repeat_block_cuda.cpp",
"fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu",
],
),
]
)
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
"cffi",
"cython",
'dataclasses; python_version<"3.7"',
"hydra-core<1.1",
"omegaconf<2.1",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"regex",
"sacrebleu>=1.4.12",
"torch",
"tqdm",
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
if __name__ == "__main__":
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples):
os.unlink(fairseq_examples)
|
the-stack_0_12219 | # Copyright (c) 2019 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
import ast
import builtins # To check against functions that are built-in in Python.
import math # Imported here so it can be used easily by the setting functions.
import uuid # Imported here so it can be used easily by the setting functions.
import base64 # Imported here so it can be used easily by the setting functions.
import hashlib # Imported here so it can be used easily by the setting functions.
from types import CodeType
from typing import Any, Callable, Dict, FrozenSet, NamedTuple, Optional, Set, TYPE_CHECKING
from UM.Settings.Interfaces import ContainerInterface
from UM.Settings.PropertyEvaluationContext import PropertyEvaluationContext
from UM.Logger import Logger
if TYPE_CHECKING:
from typing import FrozenSet
class IllegalMethodError(Exception):
pass
def _debug_value(value: Any) -> Any:
Logger.log("d", "Setting Function: %s", value)
return value
#
# This class is used to evaluate Python codes (or you can call them formulas) for a setting's property. If a setting's
# property is a static type, e.g., a string, an int, a float, etc., its value will just be interpreted as it is, but
# when it's a Python code (formula), the value needs to be evaluated via this class.
#
class SettingFunction:
## Constructor.
#
# \param code The Python code this function should evaluate.
def __init__(self, expression: str) -> None:
super().__init__()
self._code = expression
# Keys of all settings that are referenced to in this function.
self._used_keys = frozenset() # type: FrozenSet[str]
self._used_values = frozenset() # type: FrozenSet[str]
self._compiled = None # type: Optional[CodeType] #Actually an Optional['code'] object, but Python doesn't properly expose this 'code' object via any library.
self._valid = False # type: bool
try:
tree = ast.parse(self._code, "eval")
result = _SettingExpressionVisitor().visit(tree)
self._used_keys = frozenset(result.keys)
self._used_values = frozenset(result.values)
self._compiled = compile(self._code, repr(self), "eval")
self._valid = True
except (SyntaxError, TypeError) as e:
Logger.log("e", "Parse error in function ({1}) for setting: {0}".format(str(e), self._code))
except IllegalMethodError as e:
Logger.log("e", "Use of illegal method {0} in function ({1}) for setting".format(str(e), self._code))
except Exception as e:
Logger.log("e", "Exception in function ({0}) for setting: {1}".format(str(e), self._code))
## Call the actual function to calculate the value.
#
# \param value_provider The container from which to get setting values in
# the formula.
def __call__(self, value_provider: ContainerInterface, context: Optional[PropertyEvaluationContext] = None) -> Any:
if not value_provider:
return None
if not self._valid:
return None
locals = {} # type: Dict[str, Any]
# if there is a context, evaluate the values from the perspective of the original caller
if context is not None:
value_provider = context.rootStack()
for name in self._used_values:
value = value_provider.getProperty(name, "value", context)
if value is None:
continue
locals[name] = value
g = {} # type: Dict[str, Any]
g.update(globals())
g.update(self.__operators)
# override operators if there is any in the context
if context is not None:
g.update(context.context.get("override_operators", {}))
try:
if self._compiled:
return eval(self._compiled, g, locals)
Logger.log("e", "An error ocurred evaluating the function {0}.".format(self))
return 0
except Exception as e:
Logger.logException("d", "An exception occurred in inherit function {0}: {1}".format(self, str(e)))
return 0 # Settings may be used in calculations and they need a value
def __eq__(self, other: object) -> bool:
if not isinstance(other, SettingFunction):
return False
return self._code == other._code
def __hash__(self) -> int:
return hash(self._code)
## Returns whether the function is ready to be executed.
#
# \return True if the function is valid, or False if it's not.
def isValid(self) -> bool:
return self._valid
## Retrieve a set of the keys (strings) of all the settings used in this function.
#
# \return A set of the keys (strings) of all the settings used in this functions.
def getUsedSettingKeys(self) -> FrozenSet[str]:
return self._used_keys
def __str__(self) -> str:
return "={0}".format(self._code)
def __repr__(self) -> str:
return "<UM.Settings.SettingFunction (0x{0:x}) ={1} >".format(id(self), self._code)
## To support Pickle
#
# Pickle does not support the compiled code, so instead remove it from the state.
# We can re-compile it later on anyway.
def __getstate__(self) -> Dict[str, Any]:
state = self.__dict__.copy()
del state["_compiled"]
return state
def __setstate__(self, state: Dict[str, Any]) -> None:
self.__dict__.update(state)
self._compiled = compile(self._code, repr(self), "eval")
## Expose a custom function to the code executed by SettingFunction
#
# \param name What identifier to use in the executed code.
# \param operator A callable that implements the actual logic to execute.
@classmethod
def registerOperator(cls, name: str, operator: Callable) -> None:
cls.__operators[name] = operator
_SettingExpressionVisitor._knownNames.add(name)
__operators = {
"debug": _debug_value
}
_VisitResult = NamedTuple("_VisitResult", [("values", Set[str]), ("keys", Set[str])])
# Helper class used to analyze a parsed function.
#
# It walks a Python AST generated from a Python expression. It will analyze the AST and
# produce two sets, one set of "used keys" and one set of "used values". "used keys" are
# setting keys (strings) that are used by the expression, whereas "used values" are
# actual variable references that are needed for the function to be executed.
class _SettingExpressionVisitor(ast.NodeVisitor):
def __init__(self) -> None:
super().__init__()
self.values = set() # type: Set[str]
self.keys = set() # type: Set[str]
def visit(self, node: ast.AST) -> _VisitResult:
super().visit(node)
return _VisitResult(values = self.values, keys = self.keys)
def visit_Name(self, node: ast.Name) -> None: # [CodeStyle: ast.NodeVisitor requires this function name]
if node.id in self._blacklist:
raise IllegalMethodError(node.id)
if node.id not in self._knownNames and node.id not in dir(builtins):
self.values.add(node.id)
self.keys.add(node.id)
## This one is used before Python 3.8 to visit string types.
#
# visit_Str will be marked as deprecated from Python 3.8 and onwards.
def visit_Str(self, node: ast.AST) -> None:
if node.s not in self._knownNames and node.s not in dir(builtins): # type: ignore #AST uses getattr stuff, so ignore type of node.s.
self.keys.add(node.s) # type: ignore
## This one is used on Python 3.8+ to visit string types.
def visit_Constant(self, node: ast.AST) -> None:
if isinstance(node.value, str) and node.value not in self._knownNames and node.value not in dir(builtins): # type: ignore #AST uses getattr stuff, so ignore type of node.value.
self.keys.add(node.value) # type: ignore
_knownNames = {
"math",
"max",
"min",
"debug",
"sum",
"len",
"uuid",
"hashlib",
"base64"
} # type: Set[str]
_blacklist = {
"sys",
"os",
"import",
"__import__",
"eval",
"exec",
"subprocess",
} # type: Set[str]
|
the-stack_0_12220 | import json
import unittest
from linkml.generators.jsonschemagen import JsonSchemaGenerator
from tests.utils.test_environment import TestEnvironmentTestCase
from tests.test_issues.environment import env
# reported in https://github.com/linkml/linkml/issues/726
schema_str = """
id: http://example.org
name: issue-726
imports:
- https://w3id.org/linkml/types
prefixes:
x: http://example.org/
default_prefix: x
default_range: string
description: test
classes:
C:
tree_root: true
slots:
- s1
- s2
- s3
- s4
slot_usage:
s1:
equals_string: foo
s3:
equals_number: 32
D:
slots:
- s1
- s2
- s3
- s4
slots:
s1:
description: test slot that can be overridden with specific values
s2:
equals_string: bar
s3:
description: test override for equals_number
range: integer
s4:
equals_number: 7
range: integer
"""
class Issue726ConstCase(TestEnvironmentTestCase):
env = env
def test_jsonschema(self):
gen = JsonSchemaGenerator(schema_str)
output = gen.serialize()
print(output)
js = json.loads(output)
top_props = js['properties']
s1C = top_props['s1']
s2C = top_props['s2']
s3C = top_props['s3']
s4C = top_props['s4']
D = js['$defs']['D']['properties']
s1D = D['s1']
s2D = D['s2']
s3D = D['s3']
s4D = D['s4']
self.assertEqual(s1C['const'], 'foo')
self.assertEqual(s2C['const'], 'bar')
self.assertNotIn('const', s1D)
self.assertEqual(s2D['const'], 'bar')
self.assertEqual(s3C['const'], 32)
self.assertEqual(s4C['const'], 7)
self.assertNotIn('const', s3D)
self.assertEqual(s4D['const'], 7)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12225 | cancerlist = ["PANCANCER"]
input_file1 = []
output_file1 = []
threshold = 0.2
probe_count = 485577
for i in range(0, len(cancerlist)) :
input_file1.append(open(str(threshold) + ".Cutoff.FC.Pvalue." + cancerlist[i] + ".txt", 'r'))
output_file1.append(open(str(threshold) + ".MeaningfulCpGsitesByPvalue0.05.Without.Extreme." + cancerlist[i] + ".txt", 'w'))
input_file1[i].readline()
for j in range(0, probe_count) :
line1 = input_file1[i].readline().split()
site_id = line1.pop(0)
if(line1[0] == "NoSamples" or len(line1) == 1) : continue
if(float(line1[2]) > 0.05) : continue
printline = site_id
for k in range(0, len(line1)) :
printline += "\t" + line1[k]
printline += "\n"
output_file1[i].write(printline)
if(j % 10000 == 0) : print(cancerlist[i] + " %d completed." % j)
|
the-stack_0_12231 | import json
import requests
import sys
import time
from argparse import ArgumentParser
from collections import deque
from os.path import isfile
from tabber import Tabber
def _argparse():
arg_parse = ArgumentParser(description="Crawl last.fm for finnish users, given a seed person or a reference to a "
"file containing one seed name per line. Either a seed name or a seed file "
"is required")
arg_parse.add_argument("api-key", type=str,
help="last.fm api key to use for crawling.")
arg_parse.add_argument("-n", "--name", type=str, default=None,
help="Seed name for crawling names")
arg_parse.add_argument("-i", "--input", type=str, default=None,
help="Seed file for crawling. One name per line.")
arg_parse.add_argument("-o", "--output", type=str, default="fi_names.txt",
help="Output file for the names. One name per line.")
return arg_parse
class User:
def __init__(self, api_key, user_name=None, password=None):
self.api_key = api_key
self.user_name = user_name
self.password = password
class Connection:
def __init__(self, user, base_url):
self.user = user
self.base_url = base_url
self.base_time = time.time()
def get(self, payload):
payload["api_key"] = self.user.api_key
payload["format"] = "json"
if self.base_time + 1 > time.time():
time.sleep(1)
self.base_time = time.time()
r = requests.get(self.base_url, params=payload)
sys.stderr.write("{}: Retrieved {}\n".format(r.status_code, r.url))
sys.stderr.flush()
if r.status_code == 200:
return json.loads(r.text)
return None
def get_user_info(conn : Connection, user_name):
return conn.get({"method": "user.getinfo", "user": user_name})
def get_user_friends(conn : Connection, user_name):
fp = conn.get({"method": "user.getfriends", "user": user_name})
if not fp:
return None
pc = int(fp["friends"]["@attr"]["totalPages"])
fl = fp["friends"]["user"]
if pc < 2:
return fl
for i in range(2, pc + 1):
fp = conn.get({"method": "user.getfriends", "user": user_name, "page": str(i)})
if fp:
fl.extend(fp["friends"]["user"])
return fl
def main(conn, entry_points):
fil = len(entry_points)
nfil = 0
people = 0
with Tabber("retrieved lists", "finns", "other") as tabb, open("fi_names.txt", 'w') as out_file:
out_file.write("".join(["{}\n".format(n) for n in entry_points]))
found = entry_points
uq = deque()
uq.extend(entry_points)
while len(uq) > 0 and fil < 100000:
un = uq.popleft()
fl = get_user_friends(conn, un)
people += 1
if not fl:
continue
for fr in fl:
if "country" in fr and fr["country"] == "Finland" and "name" in fr and fr["name"] not in found:
out_file.write("{}\n".format(fr["name"]))
fil += 1
uq.append(fr["name"])
found.add(fr["name"])
elif "name" in fr and fr["name"] not in found:
found.add(fr["name"])
nfil += 1
tabb(people, fil, nfil)
print("Found {} people with country == Finland".format(fil))
print("Found {} people where not country == Finland".format(nfil))
def read_names(seed_file):
with open(seed_file) as in_file:
return {e[:-1] for e in in_file.readlines()}
if __name__ == "__main__":
args = _argparse().parse_args()
assert args.name or args.input, "either seed file or seed string needs to be supplied"
seed = [args.name]
if args.input and isfile(args.input):
seed = read_names(args.input)
main(Connection(User(sys.argv[1]), "http://ws.audioscrobbler.com/2.0/"), seed)
|
the-stack_0_12234 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function
import json
import os
import tempfile
import tarfile
import sys
from astropy.extern import six
from astropy.io import fits
from astropy import log
import astropy.units
import astropy.io.votable as votable
from ..query import BaseQuery
from ..utils import commons
from ..utils import async_to_sync
from . import conf
from ..exceptions import TableParseError
from .. import version
from astropy.coordinates.name_resolve import sesame_database
@async_to_sync
class ESASkyClass(BaseQuery):
URLbase = conf.urlBase
TIMEOUT = conf.timeout
DEFAULT_ROW_LIMIT = conf.row_limit
__FITS_STRING = ".fits"
__FTZ_STRING = ".FTZ"
__TAR_STRING = ".tar"
__ALL_STRING = "all"
__CATALOGS_STRING = "catalogs"
__OBSERVATIONS_STRING = "observations"
__MISSION_STRING = "mission"
__TAP_TABLE_STRING = "tapTable"
__TAP_NAME_STRING = "tapName"
__LABEL_STRING = "label"
__METADATA_STRING = "metadata"
__PRODUCT_URL_STRING = "product_url"
__SOURCE_LIMIT_STRING = "sourceLimit"
__POLYGON_NAME_STRING = "polygonNameTapColumn"
__POLYGON_RA_STRING = "polygonRaTapColumn"
__POLYGON_DEC_STRING = "polygonDecTapColumn"
__POS_TAP_STRING = "posTapColumn"
__ORDER_BY_STRING = "orderBy"
__IS_SURVEY_MISSION_STRING = "isSurveyMission"
__ZERO_ARCMIN_STRING = "0 arcmin"
__MIN_RADIUS_CATALOG_STRING = "5 arcsec"
__HERSCHEL_STRING = 'herschel'
__HST_STRING = 'hst'
__INTEGRAL_STRING = 'integral'
__HERSCHEL_FILTERS = {
'psw': '250',
'pmw': '350',
'plw': '500',
'mapb_blue': '70',
'mapb_green': '100',
'mapr_': '160'}
_MAPS_DOWNLOAD_DIR = "Maps"
_isTest = ""
def list_maps(self):
"""
Get a list of the mission names of the available observations in ESASky
"""
return self._json_object_field_to_list(
self._get_observation_json(), self.__MISSION_STRING)
def list_catalogs(self):
"""
Get a list of the mission names of the available catalogs in ESASky
"""
return self._json_object_field_to_list(
self._get_catalogs_json(), self.__MISSION_STRING)
def query_object_maps(self, position, missions=__ALL_STRING,
get_query_payload=False, cache=True):
"""
This method queries a chosen object or coordinate for all available maps
which have observation data on the chosen position. It returns a
TableList with all the found maps metadata for the chosen missions
and object.
Parameters
----------
position : str or `astropy.coordinates` object
Can either be a string of the location, eg 'M51', or the coordinates
of the object.
missions : string or list, optional
Can be either a specific mission or a list of missions (all mission
names are found in list_missions()) or 'all' to search in all
missions. Defaults to 'all'.
get_query_payload : bool, optional
When set to True the method returns the HTTP request parameters.
Defaults to False.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
table_list : `~astroquery.utils.TableList`
Each mission returns a `~astropy.table.Table` with the metadata
and observations available for the chosen missions and object.
It is structured in a TableList like this:
TableList with 8 tables:
'0:HERSCHEL' with 8 column(s) and 25 row(s)
'1:HST' with 8 column(s) and 735 row(s)
Examples
--------
query_object_maps("m101", "all")
query_object_maps("265.05, 69.0", "Herschel")
query_object_maps("265.05, 69.0", ["Herschel", "HST"])
"""
return self.query_region_maps(position=position,
radius=self.__ZERO_ARCMIN_STRING,
missions=missions,
get_query_payload=get_query_payload,
cache=cache)
def query_object_catalogs(self, position, catalogs=__ALL_STRING,
row_limit=DEFAULT_ROW_LIMIT,
get_query_payload=False, cache=True):
"""
This method queries a chosen object or coordinate for all available
catalogs and returns a TableList with all the found catalogs metadata
for the chosen missions and object. To account for errors in telescope
position, the method will look for any sources within a radius of
5 arcsec of the chosen position.
Parameters
----------
position : str or `astropy.coordinates` object
Can either be a string of the location, eg 'M51', or the coordinates
of the object.
catalogs : string or list, optional
Can be either a specific catalog or a list of catalogs (all catalog
names are found in list_catalogs()) or 'all' to search in all
catalogs. Defaults to 'all'.
row_limit : int, optional
Determines how many rows that will be fetched from the database
for each mission. Can be -1 to select maximum (currently 100 000).
Defaults to 10000.
get_query_payload : bool, optional
When set to True the method returns the HTTP request parameters.
Defaults to False.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
table_list : `~astroquery.utils.TableList`
Each mission returns a `~astropy.table.Table` with the metadata
of the catalogs available for the chosen mission and object.
It is structured in a TableList like this:
TableList with 8 tables:
'0:Gaia DR1 TGA' with 8 column(s) and 25 row(s)
'1:HSC' with 8 column(s) and 75 row(s)
Examples
--------
query_object_catalogs("m101", "all")
query_object_catalogs("265.05, 69.0", "Gaia DR1 TGA")
query_object_catalogs("265.05, 69.0", ["Gaia DR1 TGA", "HSC"])
"""
return self.query_region_catalogs(position=position,
radius=self.__ZERO_ARCMIN_STRING,
catalogs=catalogs,
row_limit=row_limit,
get_query_payload=get_query_payload,
cache=cache)
def query_region_maps(self, position, radius, missions=__ALL_STRING,
get_query_payload=False, cache=True):
"""
This method queries a chosen region for all available maps and returns a
TableList with all the found maps metadata for the chosen missions and
region.
Parameters
----------
position : str or `astropy.coordinates` object
Can either be a string of the location, eg 'M51', or the coordinates
of the object.
radius : str or `~astropy.units.Quantity`
The radius of a region.
missions : string or list, optional
Can be either a specific mission or a list of missions (all mission
names are found in list_missions()) or 'all' to search in all
missions. Defaults to 'all'.
get_query_payload : bool, optional
When set to True the method returns the HTTP request parameters.
Defaults to False.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
table_list : `~astroquery.utils.TableList`
Each mission returns a `~astropy.table.Table` with the metadata
and observations available for the chosen missions and region.
It is structured in a TableList like this:
TableList with 8 tables:
'0:HERSCHEL' with 8 column(s) and 25 row(s)
'1:HST' with 8 column(s) and 735 row(s)
Examples
--------
query_region_maps("m101", "14'", "all")
import astropy.units as u
query_region_maps("265.05, 69.0", 14*u.arcmin, "Herschel")
query_region_maps("265.05, 69.0", ["Herschel", "HST"])
"""
sanitized_position = self._sanitize_input_position(position)
sanitized_radius = self._sanitize_input_radius(radius)
sanitized_missions = self._sanitize_input_mission(missions)
query_result = {}
sesame_database.set('simbad')
coordinates = commons.parse_coordinates(sanitized_position)
self._store_query_result_maps(query_result, sanitized_missions,
coordinates, sanitized_radius,
get_query_payload, cache)
if (get_query_payload):
return query_result
return commons.TableList(query_result)
def query_region_catalogs(self, position, radius, catalogs=__ALL_STRING,
row_limit=DEFAULT_ROW_LIMIT,
get_query_payload=False, cache=True):
"""
This method queries a chosen region for all available catalogs and
returns a TableList with all the found catalogs metadata for the chosen
missions and region.
Parameters
----------
position : str or `astropy.coordinates` object
Can either be a string of the location, eg 'M51', or the coordinates
of the object.
radius : str or `~astropy.units.Quantity`
The radius of a region.
catalogs : string or list, optional
Can be either a specific catalog or a list of catalogs (all catalog
names are found in list_catalogs()) or 'all' to search in all
catalogs. Defaults to 'all'.
row_limit : int, optional
Determines how many rows that will be fetched from the database
for each mission. Can be -1 to select maximum (currently 100 000).
Defaults to 10000.
get_query_payload : bool, optional
When set to True the method returns the HTTP request parameters.
Defaults to False.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
table_list : `~astroquery.utils.TableList`
Each mission returns a `~astropy.table.Table` with the metadata of
the catalogs available for the chosen mission and region.
It is structured in a TableList like this:
TableList with 8 tables:
'0:Gaia DR1 TGA' with 8 column(s) and 25 row(s)
'1:HSC' with 8 column(s) and 75 row(s)
Examples
--------
query_region_catalogs("m101", "14'", "all")
import astropy.units as u
query_region_catalogs("265.05, 69.0", 14*u.arcmin, "Gaia DR1 TGA")
query_region_catalogs("265.05, 69.0", 14*u.arcmin, ["Gaia DR1 TGA", "HSC"])
"""
sanitized_position = self._sanitize_input_position(position)
sanitized_radius = self._sanitize_input_radius(radius)
sanitized_catalogs = self._sanitize_input_catalogs(catalogs)
sanitized_row_limit = self._sanitize_input_row_limit(row_limit)
sesame_database.set('simbad')
coordinates = commons.parse_coordinates(sanitized_position)
query_result = {}
self._store_query_result_catalogs(query_result, sanitized_catalogs,
coordinates, sanitized_radius,
sanitized_row_limit,
get_query_payload, cache)
if (get_query_payload):
return query_result
return commons.TableList(query_result)
def get_maps(self, query_table_list, missions=__ALL_STRING,
download_dir=_MAPS_DOWNLOAD_DIR, cache=True):
"""
This method takes the dictionary of missions and metadata as returned by
query_region_maps and downloads all maps to the selected folder.
The method returns a dictionary which is divided by mission.
All mission except Herschel returns a list of HDULists.
For Herschel each item in the list is a dictionary where the used
filter is the key and the HDUList is the value.
Parameters
----------
query_table_list : `~astroquery.utils.TableList`
A TableList with all the missions wanted and their respective
metadata. Usually the return value of query_region_maps.
missions : string or list, optional
Can be either a specific mission or a list of missions (all mission
names are found in list_missions()) or 'all' to search in all
missions. Defaults to 'all'.
download_dir : string, optional
The folder where all downloaded maps should be stored.
Defaults to a folder called 'Maps' in the current working directory.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
maps : `dict`
All mission except Herschel returns a list of HDULists.
For Herschel each item in the list is a dictionary where the used
filter is the key and the HDUList is the value.
It is structured in a dictionary like this:
dict: {
'HERSCHEL': [{'70': [HDUList], '160': [HDUList]}, {'70': [HDUList], '160': [HDUList]}, ...],
'HST':[[HDUList], [HDUList], [HDUList], [HDUList], [HDUList], ...],
'XMM-EPIC' : [[HDUList], [HDUList], [HDUList], [HDUList], ...]
...
}
Examples
--------
get_maps(query_region_catalogs("m101", "14'", "all"))
"""
sanitized_query_table_list = self._sanitize_input_table_list(query_table_list)
sanitized_missions = self._sanitize_input_mission(missions)
maps = dict()
for query_mission in sanitized_query_table_list.keys():
for mission in sanitized_missions:
# INTEGRAL does not have a product url yet.
if (query_mission.lower() == self.__INTEGRAL_STRING):
print("INTEGRAL does not yet support downloading of "
"fits files")
break
if (query_mission.lower() == mission.lower()):
maps[query_mission] = (
self._get_maps_for_mission(
sanitized_query_table_list[query_mission],
query_mission,
download_dir,
cache))
break
if (len(sanitized_query_table_list) > 0):
log.info("Maps available at %s" % os.path.abspath(download_dir))
else:
print("No maps found")
return maps
def get_images(self, position, radius=__ZERO_ARCMIN_STRING, missions=__ALL_STRING,
download_dir=_MAPS_DOWNLOAD_DIR, cache=True):
"""
This method gets the fits files available for the selected position and
mission and downloads all maps to the the selected folder.
The method returns a dictionary which is divided by mission.
All mission except Herschel returns a list of HDULists.
For Herschel each item in the list is a dictionary where the used
filter is the key and the HDUList is the value.
Parameters
----------
position : str or `astropy.coordinates` object
Can either be a string of the location, eg 'M51', or the coordinates
of the object.
radius : str or `~astropy.units.Quantity`, optional
The radius of a region. Defaults to 0.
missions : string or list, optional
Can be either a specific mission or a list of missions (all mission
names are found in list_missions()) or 'all' to search in all
missions. Defaults to 'all'.
download_dir : string, optional
The folder where all downloaded maps should be stored.
Defaults to a folder called 'Maps' in the current working directory.
cache : bool, optional
When set to True the method will use a cache located at
.astropy/astroquery/cache. Defaults to True.
Returns
-------
maps : `dict`
All mission except Herschel returns a list of HDULists.
For Herschel each item in the list is a dictionary where the used
filter is the key and the HDUList is the value.
It is structured in a dictionary like this:
dict: {
'HERSCHEL': [{'70': [HDUList], '160': [HDUList]}, {'70': [HDUList], '160': [HDUList]}, ...],
'HST':[[HDUList], [HDUList], [HDUList], [HDUList], [HDUList], ...],
'XMM-EPIC' : [[HDUList], [HDUList], [HDUList], [HDUList], ...]
...
}
Examples
--------
get_images("m101", "14'", "all")
"""
sanitized_position = self._sanitize_input_position(position)
sanitized_radius = self._sanitize_input_radius(radius)
sanitized_missions = self._sanitize_input_mission(missions)
maps = dict()
map_query_result = self.query_region_maps(sanitized_position,
sanitized_radius,
sanitized_missions,
get_query_payload=False,
cache=cache)
for query_mission in map_query_result.keys():
# INTEGRAL does not have a product url yet.
if (query_mission.lower() == self.__INTEGRAL_STRING):
print("INTEGRAL does not yet support downloading of "
"fits files")
continue
maps[query_mission] = (
self._get_maps_for_mission(
map_query_result[query_mission],
query_mission,
download_dir,
cache))
print("Maps available at %s" % os.path.abspath(download_dir))
return maps
def _sanitize_input_position(self, position):
if (isinstance(position, str) or isinstance(position,
commons.CoordClasses)):
return position
else:
raise ValueError("Position must be either a string or "
"astropy.coordinates")
def _sanitize_input_radius(self, radius):
if (isinstance(radius, str) or isinstance(radius,
astropy.units.Quantity)):
return radius
else:
raise ValueError("Radius must be either a string or "
"astropy.units.Quantity")
def _sanitize_input_mission(self, missions):
if (isinstance(missions, list)):
return missions
if (isinstance(missions, str)):
if (missions.lower() == self.__ALL_STRING):
return self.list_maps()
else:
return [missions]
raise ValueError("Mission must be either a string or a list of "
"missions")
def _sanitize_input_catalogs(self, catalogs):
if (isinstance(catalogs, list)):
return catalogs
if (isinstance(catalogs, str)):
if (catalogs.lower() == self.__ALL_STRING):
return self.list_catalogs()
else:
return [catalogs]
raise ValueError("Catalog must be either a string or a list of "
"catalogs")
def _sanitize_input_table_list(self, table_list):
if (isinstance(table_list, commons.TableList)):
return table_list
raise ValueError("Query_table_list must be an astropy.utils.TableList")
def _sanitize_input_row_limit(self, row_limit):
if (isinstance(row_limit, int)):
return row_limit
raise ValueError("Row_limit must be an integer")
def _get_maps_for_mission(self, maps_table, mission, download_dir, cache):
maps = []
if (len(maps_table[self.__PRODUCT_URL_STRING]) > 0):
mission_directory = self._create_mission_directory(mission,
download_dir)
print("Starting download of %s data. (%d files)"
% (mission, len(maps_table[self.__PRODUCT_URL_STRING])))
for index in range(len(maps_table)):
product_url = maps_table[self.__PRODUCT_URL_STRING][index].decode('utf-8')
if(mission.lower() == self.__HERSCHEL_STRING):
observation_id = maps_table["observation_id"][index].decode('utf-8')
else:
observation_id = (maps_table[self._get_tap_observation_id(mission)][index]
.decode('utf-8'))
print("Downloading Observation ID: %s from %s"
% (observation_id, product_url), end=" ")
sys.stdout.flush()
directory_path = mission_directory + "/"
if (mission.lower() == self.__HERSCHEL_STRING):
maps.append(self._get_herschel_map(
product_url,
directory_path,
cache))
else:
response = self._request(
'GET',
product_url,
cache=cache,
headers=self._get_header())
file_name = ""
if (product_url.endswith(self.__FITS_STRING)):
file_name = (directory_path +
self._extract_file_name_from_url(product_url))
else:
file_name = (directory_path +
self._extract_file_name_from_response_header(response.headers))
fits_data = response.content
with open(file_name, 'wb') as fits_file:
fits_file.write(fits_data)
fits_file.close()
maps.append(fits.open(file_name))
print("[Done]")
print("Downloading of %s data complete." % mission)
return maps
def _get_herschel_map(self, product_url, directory_path, cache):
observation = dict()
tar_file = tempfile.NamedTemporaryFile(delete=False)
response = self._request(
'GET',
product_url,
cache=cache,
headers=self._get_header())
tar_file.write(response.content)
tar_file.close()
with tarfile.open(tar_file.name, 'r') as tar:
i = 0
for member in tar.getmembers():
member_name = member.name.lower()
if ('hspire' in member_name or 'hpacs' in member_name):
herschel_filter = self._get_herschel_filter_name(member_name)
tar.extract(member, directory_path)
observation[herschel_filter] = fits.open(
directory_path +
member.name)
i += 1
os.remove(tar_file.name)
return observation
def _get_herschel_filter_name(self, member_name):
for herschel_filter in self.__HERSCHEL_FILTERS.keys():
if herschel_filter in member_name:
return self.__HERSCHEL_FILTERS[herschel_filter]
def _remove_extra_herschel_directory(self, file_and_directory_name,
directory_path):
full_directory_path = os.path.abspath(directory_path)
file_name = file_and_directory_name[file_and_directory_name.index("/") + 1:]
os.renames(os.path.join(full_directory_path, file_and_directory_name),
os.path.join(full_directory_path, file_name))
return file_name
def _create_mission_directory(self, mission, download_dir):
if (download_dir == self._MAPS_DOWNLOAD_DIR):
mission_directory = self._MAPS_DOWNLOAD_DIR + "/" + mission
else:
mission_directory = (download_dir + "/" + self._MAPS_DOWNLOAD_DIR +
"/" + mission)
if not os.path.exists(mission_directory):
os.makedirs(mission_directory)
return mission_directory
def _extract_file_name_from_response_header(self, headers):
content_disposition = headers.get('Content-Disposition')
filename_string = "filename="
start_index = (content_disposition.index(filename_string) +
len(filename_string))
if (content_disposition[start_index] == '\"'):
start_index += 1
if (self.__FITS_STRING in content_disposition[start_index:]):
end_index = (
content_disposition.index(self.__FITS_STRING, start_index + 1) +
len(self.__FITS_STRING))
return content_disposition[start_index: end_index]
elif (self.__FTZ_STRING in content_disposition[start_index:]):
end_index = (
content_disposition.index(self.__FTZ_STRING, start_index + 1) +
len(self.__FTZ_STRING))
return content_disposition[start_index: end_index]
elif (self.__TAR_STRING in content_disposition[start_index:]):
end_index = (
content_disposition.index(self.__TAR_STRING, start_index + 1) +
len(self.__TAR_STRING))
return content_disposition[start_index: end_index]
else:
raise ValueError("Could not find file name in header. "
"Content disposition: %s." % content_disposition)
def _extract_file_name_from_url(self, product_url):
start_index = product_url.rindex("/") + 1
return product_url[start_index:]
def _query_region_maps(self, coordinates, radius, observation_name,
get_query_payload, cache):
observation_tap_name = (
self._find_observation_tap_table_name(observation_name))
query = (
self._build_observation_query(coordinates, radius,
self._find_observation_parameters(observation_tap_name)))
request_payload = self._create_request_payload(query)
if (get_query_payload):
return request_payload
return self._get_and_parse_from_tap(request_payload, cache)
def _query_region_catalog(self, coordinates, radius, catalog_name, row_limit,
get_query_payload, cache):
catalog_tap_name = self._find_catalog_tap_table_name(catalog_name)
query = self._build_catalog_query(coordinates, radius, row_limit,
self._find_catalog_parameters(catalog_tap_name))
request_payload = self._create_request_payload(query)
if (get_query_payload):
return request_payload
return self._get_and_parse_from_tap(request_payload, cache)
def _build_observation_query(self, coordinates, radius, json):
raHours, dec = commons.coord_to_radec(coordinates)
ra = raHours * 15.0 # Converts to degrees
radiusDeg = commons.radius_to_unit(radius, unit='deg')
select_query = "SELECT DISTINCT "
metadata = json[self.__METADATA_STRING]
metadata_tap_names = ", ".join(["%s" % entry[self.__TAP_NAME_STRING]
for entry in metadata])
from_query = " FROM %s" % json[self.__TAP_TABLE_STRING]
if (radiusDeg != 0 or json[self.__IS_SURVEY_MISSION_STRING]):
if (json[self.__IS_SURVEY_MISSION_STRING]):
where_query = (" WHERE 1=CONTAINS(pos, CIRCLE('ICRS', %f, %f, %f));"
% (ra, dec, radiusDeg))
else:
where_query = (" WHERE 1=INTERSECTS(CIRCLE('ICRS', %f, %f, %f), fov);"
% (ra, dec, radiusDeg))
else:
where_query = (" WHERE 1=CONTAINS(POINT('ICRS', %f, %f), fov);"
% (ra, dec))
query = "".join([
select_query,
metadata_tap_names,
from_query,
where_query])
return query
def _build_catalog_query(self, coordinates, radius, row_limit, json):
raHours, dec = commons.coord_to_radec(coordinates)
ra = raHours * 15.0 # Converts to degrees
radiusDeg = commons.radius_to_unit(radius, unit='deg')
select_query = "SELECT "
if(row_limit > 0):
select_query = "".join([select_query, "TOP %s " % row_limit])
elif(not row_limit == -1):
raise ValueError("Invalid value of row_limit")
metadata = json[self.__METADATA_STRING]
metadata_tap_names = ", ".join(["%s" % entry[self.__TAP_NAME_STRING]
for entry in metadata])
from_query = " FROM %s" % json[self.__TAP_TABLE_STRING]
if (radiusDeg == 0):
where_query = (" WHERE 1=CONTAINS(POINT('ICRS', ra, dec), CIRCLE('ICRS', %f, %f, %f))"
% (ra,
dec,
commons.radius_to_unit(
self.__MIN_RADIUS_CATALOG_STRING,
unit='deg')))
else:
where_query = (" WHERE 1=CONTAINS(POINT('ICRS', ra, dec), CIRCLE('ICRS', %f, %f, %f))"
% (ra, dec, radiusDeg))
order_by_query = " ORDER BY %s;" % json[self.__ORDER_BY_STRING]
query = "".join([select_query, metadata_tap_names, from_query,
where_query, order_by_query])
return query
def _store_query_result_maps(self, query_result, missions, coordinates,
radius, get_query_payload, cache):
for mission in missions:
mission_table = self._query_region_maps(coordinates, radius,
mission, get_query_payload,
cache)
if (len(mission_table) > 0):
query_result[mission.upper()] = mission_table
def _store_query_result_catalogs(self, query_result, catalogs, coordinates,
radius, row_limit, get_query_payload, cache):
for catalog in catalogs:
catalog_table = self._query_region_catalog(coordinates, radius,
catalog, row_limit,
get_query_payload, cache)
if (len(catalog_table) > 0):
query_result[catalog.upper()] = catalog_table
def _find_observation_parameters(self, mission_name):
return self._find_mission_parameters_in_json(mission_name,
self._get_observation_json())
def _find_catalog_parameters(self, catalog_name):
return self._find_mission_parameters_in_json(catalog_name,
self._get_catalogs_json())
def _find_mission_parameters_in_json(self, mission_tap_name, json):
for mission in json:
if (mission[self.__TAP_TABLE_STRING] == mission_tap_name):
return mission
raise ValueError("Input tap name %s not available." % mission_tap_name)
def _find_observation_tap_table_name(self, mission_name):
return self._find_mission_tap_table_name(
self._fetch_and_parse_json(self.__OBSERVATIONS_STRING),
mission_name)
def _find_catalog_tap_table_name(self, mission_name):
return self._find_mission_tap_table_name(
self._fetch_and_parse_json(self.__CATALOGS_STRING),
mission_name)
def _find_mission_tap_table_name(self, json, mission_name):
for index in range(len(json)):
if (json[index][self.__MISSION_STRING].lower() == mission_name.lower()):
return json[index][self.__TAP_TABLE_STRING]
raise ValueError("Input %s not available." % mission_name)
return None
def _get_observation_json(self):
return self._fetch_and_parse_json(self.__OBSERVATIONS_STRING)
def _get_catalogs_json(self):
return self._fetch_and_parse_json(self.__CATALOGS_STRING)
def _fetch_and_parse_json(self, object_name):
url = self.URLbase + "/" + object_name
response = self._request(
'GET',
url,
cache=False,
headers=self._get_header())
string_response = response.content.decode('utf-8')
json_response = json.loads(string_response)
return json_response["descriptors"]
def _json_object_field_to_list(self, json, field_name):
response_list = []
for index in range(len(json)):
response_list.append(json[index][field_name])
return response_list
def _get_json_data_for_mission(self, json, mission):
for index in range(len(json)):
if(json[index][self.__MISSION_STRING].lower() == mission.lower()):
return json[index]
def _get_tap_observation_id(self, mission):
return self._get_json_data_for_mission(self._get_observation_json(), mission)["tapObservationId"]
def _create_request_payload(self, query):
return {'REQUEST': 'doQuery', 'LANG': 'ADQL', 'FORMAT': 'VOTABLE',
'QUERY': query}
def _get_and_parse_from_tap(self, request_payload, cache):
response = self._send_get_request("/tap/sync", request_payload, cache)
return self._parse_xml_table(response)
def _send_get_request(self, url_extension, request_payload, cache):
url = self.URLbase + url_extension
return self._request('GET',
url,
params=request_payload,
timeout=self.TIMEOUT,
cache=cache,
headers=self._get_header())
def _parse_xml_table(self, response):
# try to parse the result into an astropy.Table, else
# return the raw result with an informative error message.
try:
tf = six.BytesIO(response.content)
vo_table = votable.parse(tf, pedantic=False)
first_table = vo_table.get_first_table()
table = first_table.to_table(use_names_over_ids=True)
return table
except Exception as ex:
self.response = response
self.table_parse_error = ex
raise TableParseError(
"Failed to parse ESASky VOTABLE result! The raw response can be "
"found in self.response, and the error in "
"self.table_parse_error.")
def _get_header(self):
user_agent = 'astropy:astroquery.esasky.{vers} {isTest}'.format(
vers=version.version,
isTest=self._isTest)
return {'User-Agent': user_agent}
ESASky = ESASkyClass()
|
the-stack_0_12235 | from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('task.apis.urls')), # for apis
path('', include('task.forms.urls')), # for forms forms
path('auth/', include('rest_auth.urls')), # for login
path('accounts/', include('django.contrib.auth.urls'))
]
urlpatterns += staticfiles_urlpatterns()
|
the-stack_0_12236 | import sys
import struct
import collections
from . import filter_nan
from .ins401_field_parser import decode_value
from ...framework.utils.print import print_yellow
from ...framework.context import APP_CONTEXT
# input packet
error_decode_packet = 0
def _format_string(data_buffer):
parsed = bytearray(data_buffer) if data_buffer and len(
data_buffer) > 0 else None
formatted = ''
if parsed is not None:
try:
if sys.version_info < (3, 0):
formatted = str(struct.pack(
'{0}B'.format(len(parsed)), *parsed))
else:
formatted = str(struct.pack(
'{0}B'.format(len(parsed)), *parsed), 'utf-8')
except UnicodeDecodeError:
APP_CONTEXT.get_logger().logger.error('Parse data as string failed')
formatted = ''
return formatted
def string_parser(payload, user_configuration):
error = False
data = ''
data_str = _format_string(payload)
if data_str and (data_str.find('INS401') > -1) \
and (data_str.find('RTK_INS App') > -1) \
and (data_str.find('Bootloader') > -1):
data = data_str
else:
error = True
return data, error
def get_all_parameters_parser(payload, user_configuration):
'''
gA parser
'''
error = False
data = []
data_len = 0
for parameter in user_configuration:
param_id = parameter['paramId']
param_type = parameter['type']
name = parameter['name']
if param_type == 'uint8' or param_type == 'int8':
value = decode_value(
param_type, payload[data_len:data_len + 1])
data_len = data_len + 1
elif param_type == 'uint16' or param_type == 'int16':
value = decode_value(
param_type, payload[data_len:data_len + 2])
data_len = data_len + 2
elif param_type == 'uint32' or param_type == 'int32' or param_type == 'float':
value = decode_value(
param_type, payload[data_len:data_len + 4])
data_len = data_len + 4
elif param_type == 'uint64' or param_type == 'int64' or param_type == 'double':
value = decode_value(
param_type, payload[data_len:data_len + 8])
data_len = data_len + 8
elif param_type == 'ip4':
value = decode_value(
param_type, payload[data_len:data_len + 4])
data_len = data_len + 4
elif param_type == 'ip6':
value = decode_value(
param_type, payload[data_len:data_len + 6])
data_len = data_len + 6
elif 'char' in param_type:
ctype_n = param_type.replace('char', '')
ctype_l = int(ctype_n)
value = decode_value(
param_type, payload[data_len:data_len + ctype_l])
data_len = data_len + ctype_l
else:
print(
"no [{0}] when unpack_input_packet".format(param_type))
value = False
data.append(
{"paramId": param_id, "name": name, "value": value})
return data, error
def get_parameters_by_block_parser(payload, user_configuration):
'''
gB parser
'''
data = []
error = False
start_param_id = payload[0]
end_param_id = payload[1]
data_len = 2
for i in range(start_param_id, end_param_id+1, 1):
exist_param_conf = next((param_conf for param_conf in user_configuration
if param_conf['paramId'] == i), None)
if exist_param_conf:
param_type = exist_param_conf['type']
if param_type == 'uint8' or param_type == 'int8':
value = decode_value(
param_type, payload[data_len:data_len + 1])
data_len = data_len + 1
elif param_type == 'uint16' or param_type == 'int16':
value = decode_value(
param_type, payload[data_len:data_len + 2])
data_len = data_len + 2
elif param_type == 'uint32' or param_type == 'int32' or param_type == 'float':
value = decode_value(
param_type, payload[data_len:data_len + 4], exist_param_conf)
data_len = data_len + 4
elif param_type == 'uint64' or param_type == 'int64' or param_type == 'double':
value = decode_value(
param_type, payload[data_len:data_len + 8])
data_len = data_len + 8
elif param_type == 'ip4':
value = decode_value(
param_type, payload[data_len:data_len + 4])
data_len = data_len + 4
elif param_type == 'ip6':
value = decode_value(
param_type, payload[data_len:data_len + 6])
data_len = data_len + 6
elif 'char' in param_type:
ctype_n = param_type.replace('char', '')
ctype_l = int(ctype_n)
value = decode_value(
param_type, payload[data_len:data_len + ctype_l])
data_len = data_len + ctype_l
else:
print(
"no [{0}] when unpack_input_packet".format(param_type))
value = False
data.append({
"paramId": i,
"name": exist_param_conf['name'],
"value": value
})
return data, error
def get_parameter_parser(payload, user_configuration):
'''
gP Parser
'''
data = None
error = False
param_id = decode_value('uint32', payload[0:4])
if param_id is not False:
param = filter(lambda item: item['paramId'] ==
param_id, user_configuration)
try:
first_item = next(iter(param), None)
param_value = decode_value(
first_item['type'], payload[4:12], first_item)
data = {"paramId": param_id,
"name": first_item['name'], "value": param_value}
except StopIteration:
error = True
except Exception:
error = True
else:
error = True
return data, error
def update_parameter_parser(payload, user_configuration):
'''
uP parser
'''
error = False
data = decode_value('int32', payload[0:4])
if data != 0:
error = True
return data, error
def update_parameters_parser(payload, user_configuration):
'''
uB parser
'''
error = False
data = decode_value('uint32', payload[0:4])
if data:
error = True
return data, error
def common_input_parser(payload, user_configuration):
'''
General input packet parser
'''
print('common_input_parser:', payload)
return payload, False
def read_eeprom_parser(payload, user_configuration=None):
return payload[3:], False
# output packet
def common_continuous_parser(payload, configuration):
'''
Unpack output packet
'''
if configuration is None:
return
data = None
is_list = 0
length = 0
pack_fmt = '<'
for value in configuration['payload']:
if value['type'] == 'float':
pack_fmt += 'f'
length += 4
elif value['type'] == 'uint32':
pack_fmt += 'I'
length += 4
elif value['type'] == 'int32':
pack_fmt += 'i'
length += 4
elif value['type'] == 'int16':
pack_fmt += 'h'
length += 2
elif value['type'] == 'uint16':
pack_fmt += 'H'
length += 2
elif value['type'] == 'double':
pack_fmt += 'd'
length += 8
elif value['type'] == 'int64':
pack_fmt += 'q'
length += 8
elif value['type'] == 'uint64':
pack_fmt += 'Q'
length += 8
elif value['type'] == 'char':
pack_fmt += 'c'
length += 1
elif value['type'] == 'uchar':
pack_fmt += 'B'
length += 1
elif value['type'] == 'uint8':
pack_fmt += 'B'
length += 1
len_fmt = '{0}B'.format(length)
has_list = configuration.__contains__('isList')
if has_list:
is_list = configuration['isList']
if is_list == 1:
packet_num = len(payload) // length
data = []
for i in range(packet_num):
payload_c = payload[i*length:(i+1)*length]
try:
pack_item = struct.pack(len_fmt, *payload_c)
item = struct.unpack(pack_fmt, pack_item)
out = [(value['name'], item[idx])
for idx, value in enumerate(configuration['payload'])]
item = collections.OrderedDict(out)
data.append(item)
except Exception as ex: # pylint: disable=broad-except
print(
"error happened when decode the payload, pls restart driver: {0}"
.format(ex))
else:
try:
pack_item = struct.pack(len_fmt, *payload)
data = struct.unpack(pack_fmt, pack_item)
out = [(
value['name'],
filter_nan(data[idx])
) for idx, value in enumerate(configuration['payload'])]
data = collections.OrderedDict(out)
except Exception as ex: # pylint: disable=broad-except
global error_decode_packet
error_decode_packet = error_decode_packet + 1
if error_decode_packet == 100 or error_decode_packet == 400 or error_decode_packet == 700:
print_yellow(
"warning: your firmware may not suitable for this driver, pls update firmware or driver")
if error_decode_packet % 300 == 0:
APP_CONTEXT.get_logger().logger.warning(
"error happened when decode the payload of packets, pls restart driver: {0}"
.format(ex))
return data
def other_output_parser(payload):
return payload
# packet handler
def match_command_handler(packet_type):
'''
Find the handler for specified packet
'''
parser_dict = {
b'\x01\xcc': string_parser,
b'\x02\xcc': get_parameter_parser,
b'\x03\xcc': update_parameter_parser,
b'\x04\xcc': update_parameter_parser,
b'\x01\x0b': common_input_parser,
b'\x02\x0b': common_input_parser
}
return parser_dict.get(packet_type)
|
the-stack_0_12237 | import datetime
import json
import os
from dotenv import load_dotenv
from Santander.SantanderScrapper import SantanderScrapper
load_dotenv(verbose=True)
from Clear.ClearScrapper import ClearScrapper
from GuiaBolso.GuiaBolsoScrapper import GuiaBolsoScrapper
from Rico.RicoScrapper import RicoScrapper
from SmarttBot.SmarttBotScrapper import SmarttBotScrapper
def save_output(provider, data):
date = datetime.datetime.today().strftime('%Y-%m-%d')
time = datetime.datetime.today().strftime('%X')
dir = 'output/' + date + '/'
if not os.path.exists(dir):
os.makedirs(dir)
f = open(dir + provider + '.json', 'w')
data['date'] = date
data['time'] = time
data['label'] = provider
f.write(json.dumps(data))
f.close()
def scrap_rico():
if os.getenv("RICO_USR"):
rico_scrapper = RicoScrapper(os.getenv("RICO_USR"), os.getenv("RICO_PWD"))
res = rico_scrapper.init()
save_output('rico', res)
def scrap_clear():
if os.getenv("CLEAR_CPF"):
clear_scrapper = ClearScrapper(os.getenv("CLEAR_CPF"), os.getenv("CLEAR_PWD"), os.getenv("CLEAR_BIRTHDATE"))
res = clear_scrapper.init()
save_output('clear', res)
def scrap_smartt_bot():
if os.getenv("SMARTT_BOT_USR"):
smartt_bot_scrapper = SmarttBotScrapper(os.getenv("SMARTT_BOT_USR"), os.getenv("SMARTT_BOT_PWD"))
res = smartt_bot_scrapper.init()
save_output('smartt_bot', res)
def scrap_guiabolso():
if os.getenv("GUIABOLSO_USR"):
guiabolso_scrapper = GuiaBolsoScrapper(os.getenv("GUIABOLSO_USR"), os.getenv("GUIABOLSO_PWD"))
res = guiabolso_scrapper.init()
save_output('guiabolso', res)
def scrap_santander():
guiabolso_scrapper = SantanderScrapper(os.getenv("SANTANDER_CPF"), os.getenv("SANTANDER_PWD"),
os.getenv("SANTANDER_LAST_DIGITS"))
res = guiabolso_scrapper.init()
def scrap_all():
scrap_guiabolso()
scrap_clear()
scrap_rico()
scrap_smartt_bot()
# scrap_smartt_bot()
# scrap_clear()
# scrap_guiabolso()
scrap_all()
# scrap_rico()
# scrap_santander()
|
the-stack_0_12238 | import string
from spacy.lang.pl import STOP_WORDS as stop_words
try:
import morfeusz2
morph = morfeusz2.Morfeusz()
except ImportError:
print('Warning: Morfeusz couldn\'t be imported')
morph = None
letters = string.ascii_letters + 'ąćęłńóśźż'
class Word:
def __init__(self, text):
self.text = text
self.lemma = self.lemma()
self.is_stop = self.is_stop()
@classmethod
def from_text(cls, text):
if len(text) == 0:
return None
return cls(text)
@classmethod
def generator(cls, text):
separators = string.whitespace
while len(text) > 0:
positions = {separator: text.find(separator) for separator in separators}
positions = {separator: positions[separator] for separator in separators if positions[separator] > -1}
position_extractor = lambda separator: positions[separator]
next_separator = min(positions, key=position_extractor) if len(positions) > 0 else None
if next_separator is None:
result = cls.from_text(text)
if result is not None:
yield result
return
result = cls.from_text(text[:positions[next_separator] + 1])
if result is not None:
yield result
text = text[positions[next_separator] + 1:]
def lemma(self):
if self.text.find(string.digits) > -1:
return '#NUMERIC'
main_text = ''.join(char for char in self.text if char in letters)
if morph is None:
return main_text
morph_analysis = morph.analyse(main_text)
if len(morph_analysis) == 0:
return main_text
return morph_analysis[0][2][1].split(':')[0]
def is_stop(self):
return self.text in stop_words or self.lemma in stop_words
def __str__(self):
return self.text
|
the-stack_0_12241 | """
Streaming Parallel Data Processing
===================================================================
Neuraxle steps for streaming data in parallel in the pipeline
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
from abc import abstractmethod
from multiprocessing import Queue
from multiprocessing.context import Process
from threading import Thread
from typing import Tuple, List, Union, Iterable, Any
from neuraxle.base import NamedTupleList, ExecutionContext, BaseStep, MetaStep, BaseSaver, _FittableStep, \
BaseTransformer, NonFittableMixin
from neuraxle.data_container import DataContainer, ListDataContainer, AbsentValuesNullObject
from neuraxle.pipeline import Pipeline, MiniBatchSequentialPipeline, Joiner
from neuraxle.steps.numpy import NumpyConcatenateOuterBatch
class ObservableQueueMixin:
"""
A class to represent a step that can put items in a queue.
It can also notify other queues that have subscribed to him using subscribe.
.. seealso::
:class:`BaseStep`,
:class:`QueuedPipelineTask`,
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`SequentialQueuedPipeline`
"""
def __init__(self, queue):
self.queue = queue
self.observers = []
self._add_observable_queue_step_saver()
def teardown(self):
self.queue = None
return self
def _add_observable_queue_step_saver(self):
if not hasattr(self, 'savers'):
warnings.warn(
'Please initialize Mixins in the good order. ObservableQueueMixin should be initialized after '
'Appending the ObservableQueueStepSaver to the savers. Saving might fail.'
)
self.savers = [ObservableQueueStepSaver()]
else:
self.savers.append(ObservableQueueStepSaver())
def subscribe(self, observer_queue_worker: 'ObservableQueueMixin') -> 'ObservableQueueMixin':
"""
Subscribe a queue worker.
The subscribed queue workers get notified when :func:`~neuraxle.distributed.streaming.ObservableQueueMixin.notify` is called.
"""
self.observers.append(observer_queue_worker.queue)
return self
def get(self) -> 'QueuedPipelineTask':
"""
Get last item in queue.
"""
return self.queue.get()
def put(self, value: DataContainer):
"""
Put a queued pipeline task in queue.
"""
self.queue.put(QueuedPipelineTask(step_name=self.name, data_container=value.copy()))
def notify(self, value):
"""
Notify all subscribed queue workers
"""
for observer in self.observers:
observer.put(value)
class QueuedPipelineTask(object):
"""
Data object to contain the tasks processed by the queued pipeline.
.. seealso::
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`SequentialQueuedPipeline`
"""
def __init__(self, data_container, step_name=None):
self.step_name = step_name
self.data_container = data_container
class ObservableQueueStepSaver(BaseSaver):
"""
Saver for observable queue steps.
.. seealso::
:class:`QueueWorker`,
:class:`neuraxle.base.BaseSaver`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`SequentialQueuedPipeline`
"""
def save_step(self, step: BaseTransformer, context: 'ExecutionContext') -> BaseTransformer:
step.queue = None
step.observers = []
return step
def can_load(self, step: BaseTransformer, context: 'ExecutionContext') -> bool:
return True
def load_step(self, step: 'BaseTransformer', context: 'ExecutionContext') -> 'BaseTransformer':
step.queue = Queue()
return step
class QueueWorker(ObservableQueueMixin, MetaStep):
"""
Start multiple Process or Thread that process items from the queue of batches to process.
It is both an observable, and observer.
It notifies the results of the wrapped step handle transform method.
It receives the next data container to process.
.. seealso::
:class:`Observer`,
:class:`Observable`,
:class:`MetaStepMixin`,
:class:`BaseStep`
"""
def __init__(
self,
wrapped: BaseTransformer,
max_queue_size: int,
n_workers: int,
use_threading: bool,
additional_worker_arguments=None,
use_savers=False
):
if not additional_worker_arguments:
additional_worker_arguments = [[] for _ in range(n_workers)]
MetaStep.__init__(self, wrapped)
ObservableQueueMixin.__init__(self, Queue(maxsize=max_queue_size))
self.use_threading: bool = use_threading
self.workers: List[Process] = []
self.n_workers: int = n_workers
self.observers: List[Queue] = []
self.additional_worker_arguments = additional_worker_arguments
self.use_savers = use_savers
def start(self, context: ExecutionContext):
"""
Start multiple processes or threads with the worker function as a target.
:param context: execution context
:type context: ExecutionContext
:return:
"""
target_function = worker_function
if self.use_savers:
self.save(context, full_dump=True)
target_function = worker_function
self.workers = []
for _, worker_arguments in zip(range(self.n_workers), self.additional_worker_arguments):
if self.use_threading:
p = Thread(target=target_function, args=(self, context, self.use_savers, worker_arguments))
else:
p = Process(target=target_function, args=(self, context, self.use_savers, worker_arguments))
p.daemon = True
p.start()
self.workers.append(p)
def teardown(self):
"""
Stop all processes on teardown.
:return: teardowned self
"""
self.stop()
return self
def stop(self):
"""
Stop all of the workers.
:return:
"""
if not self.use_threading:
[w.terminate() for w in self.workers]
self.workers = []
self.observers = []
def worker_function(queue_worker: QueueWorker, context: ExecutionContext, use_savers: bool,
additional_worker_arguments):
"""
Worker function that transforms the items inside the queue of items to process.
:param queue_worker: step to transform
:param context: execution context
:param use_savers: use savers
:param additional_worker_arguments: any additional arguments that need to be passed to the workers
:return:
"""
step = queue_worker.get_step()
if use_savers:
saved_queue_worker: QueueWorker = context.load(queue_worker.get_name())
step = saved_queue_worker.get_step()
additional_worker_arguments = tuple(
additional_worker_arguments[i: i + 2] for i in range(0, len(additional_worker_arguments), 2)
)
for argument_name, argument_value in additional_worker_arguments:
step.__dict__.update({argument_name: argument_value})
while True:
try:
task: QueuedPipelineTask = queue_worker.get()
summary_id = task.data_container.summary_id
data_container = step.handle_transform(task.data_container, context)
data_container = data_container.set_summary_id(summary_id)
queue_worker.notify(QueuedPipelineTask(step_name=queue_worker.name, data_container=data_container))
except Exception as err:
queue_worker.notify(QueuedPipelineTask(step_name=queue_worker.name, data_container=err))
QueuedPipelineStepsTuple = Union[
BaseTransformer, # step
Tuple[int, BaseTransformer], # (n_workers, step)
Tuple[str, BaseTransformer], # (step_name, step)
Tuple[str, int, BaseTransformer], # (step_name, n_workers, step)
Tuple[str, int, int, BaseTransformer], # (step_name, n_workers, max_queue_size, step)
Tuple[str, int, List[Tuple], BaseTransformer], # (step_name, n_workers, additional_worker_arguments, step)
Tuple[str, int, List[Tuple], BaseTransformer] # (step_name, n_workers, additional_worker_arguments, step)
]
class BaseQueuedPipeline(MiniBatchSequentialPipeline):
"""
Sub class of :class:`Pipeline`.
Transform data in many pipeline steps at once in parallel in the pipeline using multiprocessing Queues.
Example usage :
.. code-block:: python
# step name, step
p = QueuedPipeline([
('step_a', Identity()),
('step_b', Identity()),
], n_workers=1, batch_size=10, max_queue_size=10)
# step name, number of workers, step
p = QueuedPipeline([
('step_a', 1, Identity()),
('step_b', 1, Identity()),
], batch_size=10, max_queue_size=10)
# step name, number of workers, and max size
p = QueuedPipeline([
('step_a', 1, 10, Identity()),
('step_b', 1, 10, Identity()),
], batch_size=10)
# step name, number of workers for each step, and additional argument for each worker
p = QueuedPipeline([
('step_a', 1, [('host', 'host1'), ('host', 'host2')], 10, Identity())
], batch_size=10)
# step name, number of workers for each step, additional argument for each worker, and max size
p = QueuedPipeline([
('step_a', 1, [('host', 'host1'), ('host', 'host2')], 10, Identity())
], batch_size=10)
:param steps: pipeline steps
:param batch_size: number of elements to combine into a single batch
:param n_workers_per_step: number of workers to spawn per step
:param max_queue_size: max number of elements inside the processing queue
:param data_joiner: transformer step to join streamed batches together at the end of the pipeline
:param use_threading: (Optional.) use threading for parallel processing. multiprocessing.context.Process is used by default.
:param use_savers: use savers to serialize steps for parallel processing.
:param include_incomplete_batch: (Optional.) A bool representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
:param default_value_data_inputs: expected_outputs default fill value
for padding and values outside iteration range, or :class:`~neuraxle.data_container.DataContainer.AbsentValuesNullObject`
to trim absent values from the batch
:param default_value_expected_outputs: expected_outputs default fill value
for padding and values outside iteration range, or :class:`~neuraxle.data_container.DataContainer.AbsentValuesNullObject`
to trim absent values from the batch
:param cache_folder: cache_folder if its at the root of the pipeline
.. seealso::
:class:`QueueWorker`,
:class:`QueueJoiner`,
:class:`CustomPipelineMixin`,
:class:`Pipeline`
"""
def __init__(
self,
steps: List[QueuedPipelineStepsTuple],
batch_size: int,
n_workers_per_step: int = None,
max_queue_size: int = None,
data_joiner = None,
use_threading: bool = False,
use_savers: bool = False,
include_incomplete_batch: bool = False,
default_value_data_inputs: Union[Any, AbsentValuesNullObject] = None,
default_value_expected_outputs: Union[Any, AbsentValuesNullObject] = None,
cache_folder: str = None,
):
if data_joiner is None:
data_joiner = NumpyConcatenateOuterBatch()
self.data_joiner = data_joiner
self.max_queue_size = max_queue_size
self.batch_size = batch_size
self.n_workers_per_step = n_workers_per_step
self.use_threading = use_threading
self.use_savers = use_savers
self.batch_size: int = batch_size
self.include_incomplete_batch: bool = include_incomplete_batch
self.default_value_data_inputs: Union[Any, AbsentValuesNullObject] = default_value_data_inputs
self.default_value_expected_outputs: Union[Any, AbsentValuesNullObject] = default_value_expected_outputs
MiniBatchSequentialPipeline.__init__(
self,
steps=self._initialize_steps_as_tuple(steps),
cache_folder=cache_folder,
batch_size=batch_size,
include_incomplete_batch=include_incomplete_batch,
default_value_data_inputs=default_value_data_inputs,
default_value_expected_outputs=default_value_expected_outputs
)
self._refresh_steps()
def _initialize_steps_as_tuple(self, steps):
"""
Wrap each step by a :class:`QueueWorker` to allow data to flow in many pipeline steps at once in parallel.
:param steps: (name, n_workers, step)
:type steps: NameNWorkerStepTupleList
:return: steps as tuple
:rtype: NamedTupleList
"""
steps_as_tuple: NamedTupleList = []
for step in steps:
queue_worker = self._create_queue_worker(step)
steps_as_tuple.append((queue_worker.name, queue_worker))
steps_as_tuple.append(('queue_joiner', QueueJoiner(batch_size=self.batch_size)))
return steps_as_tuple
def _create_queue_worker(self, step: QueuedPipelineStepsTuple):
name, n_workers, additional_worker_arguments, max_queue_size, actual_step = self._get_step_params(step)
return QueueWorker(
actual_step,
n_workers=n_workers,
use_threading=self.use_threading,
max_queue_size=max_queue_size,
additional_worker_arguments=additional_worker_arguments,
use_savers=self.use_savers
).set_name('QueueWorker{}'.format(name))
def _get_step_params(self, step):
"""
Return all params necessary to create the QueuedPipeline for the given step.
:param step: tuple
:type step: QueuedPipelineStepsTupleList
:return: return name, n_workers, max_queue_size, actual_step
:rtype: tuple(str, int, int, BaseStep)
"""
if isinstance(step, BaseTransformer):
actual_step = step
name = step.name
max_queue_size = self.max_queue_size
n_workers = self.n_workers_per_step
additional_arguments = []
elif len(step) == 2:
if isinstance(step[0], str):
name, actual_step = step
n_workers = self.n_workers_per_step
else:
n_workers, actual_step = step
name = actual_step.name
max_queue_size = self.max_queue_size
additional_arguments = []
elif len(step) == 3:
name, n_workers, actual_step = step
max_queue_size = self.max_queue_size
additional_arguments = []
elif len(step) == 4:
if isinstance(step[2], Iterable):
name, n_workers, additional_arguments, actual_step = step
max_queue_size = self.max_queue_size
else:
name, n_workers, max_queue_size, actual_step = step
additional_arguments = []
elif len(step) == 5:
name, n_workers, additional_arguments, max_queue_size, actual_step = step
else:
raise Exception('Invalid Queued Pipeline Steps Shape.')
return name, n_workers, additional_arguments, max_queue_size, actual_step
def _will_process(self, data_container: DataContainer, context: ExecutionContext) -> (
DataContainer, ExecutionContext):
"""
Setup streaming pipeline before any handler methods.
:param data_container: data container
:param context: execution context
:return:
"""
self.setup(context=context)
return data_container, context
def setup(self, context: ExecutionContext = None) -> 'BaseTransformer':
"""
Connect the queued workers together so that the data can correctly flow through the pipeline.
:param context: execution context
:return: step
:rtype: BaseStep
"""
if not self.is_initialized:
self.connect_queued_pipeline()
super().setup(context=context)
return self
def fit_transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> (
'Pipeline', DataContainer):
"""
Fit transform sequentially if any step is fittable. Otherwise transform in parallel.
:param data_container: data container
:type data_container: DataContainer
:param context: execution context
:type context: ExecutionContext
:return:
"""
all_steps_are_not_fittable = True
for _, step in self[:-1]:
if isinstance(step.get_step(), _FittableStep) and not isinstance(step.get_step(), NonFittableMixin):
all_steps_are_not_fittable = False
if all_steps_are_not_fittable:
data_container = self.transform_data_container(data_container, context)
data_container = self._did_transform(data_container, context)
return self, data_container
self.is_invalidated = True
return super().fit_transform_data_container(data_container, context)
def transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer:
"""
Transform data container
:param data_container: data container to transform.
:type data_container: DataContainer
:param context: execution context
:type context: ExecutionContext
:return: data container
"""
data_container_batches = data_container.minibatches(
batch_size=self.batch_size,
include_incomplete_batch=self.include_incomplete_batch,
default_value_data_inputs=self.default_value_data_inputs,
default_value_expected_outputs=self.default_value_expected_outputs
)
n_batches = self.get_n_batches(data_container)
self[-1].set_n_batches(n_batches)
for name, step in self[:-1]:
step.start(context)
batch_index = 0
for data_container_batch in data_container_batches:
self.send_batch_to_queued_pipeline(batch_index=batch_index, data_container=data_container_batch)
batch_index += 1
data_container = self[-1].join(original_data_container=data_container)
return data_container
def _did_transform(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer:
"""
Stop all of the workers after transform. Also, join the data using self.data_joiner.
:param data_container: data container
:type data_container: DataContainer
:param context: execution context
:type context: ExecutionContext
:return: data container
:rtype: DataContainer
"""
for name, step in self[:-1]:
step.stop()
return self.data_joiner.handle_transform(data_container, context)
@abstractmethod
def get_n_batches(self, data_container) -> int:
"""
Get the total number of batches that the queue joiner is supposed to receive.
:param data_container: data container to transform
:type data_container: DataContainer
:return:
"""
raise NotImplementedError()
@abstractmethod
def connect_queued_pipeline(self):
"""
Connect all the queued workers together so that the data can flow through each step.
:return:
"""
raise NotImplementedError()
@abstractmethod
def send_batch_to_queued_pipeline(self, batch_index: int, data_container: DataContainer):
"""
Send batches to queued pipeline. It is blocking if there is no more space available in the multiprocessing queues.
Workers might return batches in a different order, but the queue joiner will reorder them at the end.
The queue joiner will use the summary ids to reorder all of the received batches.
:param batch_index: batch index
:param data_container: data container batch
:return:
"""
raise NotImplementedError()
class SequentialQueuedPipeline(BaseQueuedPipeline):
"""
Using :class:`QueueWorker`, run all steps sequentially even if they are in separate processes or threads.
.. seealso::
:func:`~neuraxle.data_container.DataContainer.minibatches`,
:class:`~neuraxle.data_container.DataContainer.AbsentValuesNullObject`,
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`QueueJoiner`,
:class:`Observer`,
:class:`Observable`
"""
def get_n_batches(self, data_container) -> int:
"""
Get the number of batches to process.
:param data_container: data container to transform
:return: number of batches
"""
return data_container.get_n_batches(
batch_size=self.batch_size,
include_incomplete_batch=self.include_incomplete_batch
)
def connect_queued_pipeline(self):
"""
Sequentially connect of the queued workers.
:return:
"""
for i, (name, step) in enumerate(self[1:]):
self[i].subscribe(step)
def send_batch_to_queued_pipeline(self, batch_index: int, data_container: DataContainer):
"""
Send batches to process to the first queued worker.
:param batch_index: batch index
:param data_container: data container batch
:return:
"""
data_container = data_container.set_summary_id(data_container.hash_summary())
self[-1].summary_ids.append(data_container.summary_id)
self[0].put(data_container)
class ParallelQueuedFeatureUnion(BaseQueuedPipeline):
"""
Using :class:`QueueWorker`, run all steps in parallel using QueueWorkers.
.. seealso::
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`SequentialQueuedPipeline`,
:class:`QueueJoiner`,
:class:`Observer`,
:class:`Observable`
"""
def get_n_batches(self, data_container):
"""
Get the number of batches to process by the queue joiner.
:return:
"""
return data_container.get_n_batches(self.batch_size) * (len(self) - 1)
def connect_queued_pipeline(self):
"""
Connect the queue joiner to all of the queued workers to process data in parallel.
:return:
"""
for name, step in self[:-1]:
step.subscribe(self[-1])
def send_batch_to_queued_pipeline(self, batch_index: int, data_container: DataContainer):
"""
Send batches to process to all of the queued workers.
:param batch_index: batch index
:param data_container: data container batch
:return:
"""
for name, step in self[:-1]:
data_container = data_container.set_summary_id(data_container.hash_summary())
self[-1].summary_ids.append(data_container.summary_id)
step.put(data_container)
class QueueJoiner(ObservableQueueMixin, Joiner):
"""
Observe the results of the queue worker of type :class:`QueueWorker`.
Synchronize all of the workers together.
.. seealso::
:class:`QueuedPipeline`,
:class:`Observer`,
:class:`ListDataContainer`,
:class:`DataContainer`
"""
def __init__(self, batch_size, n_batches=None):
self.n_batches_left_to_do = n_batches
self.summary_ids = []
self.result = {}
Joiner.__init__(self, batch_size=batch_size)
ObservableQueueMixin.__init__(self, Queue())
def teardown(self) -> 'BaseTransformer':
"""
Properly clean queue, summary ids, and results during teardown.
:return: teardowned self
"""
ObservableQueueMixin.teardown(self)
Joiner.teardown(self)
self.summary_ids = []
self.result = {}
return self
def set_n_batches(self, n_batches):
self.n_batches_left_to_do = n_batches
def join(self, original_data_container: DataContainer) -> DataContainer:
"""
Return the accumulated results received by the on next method of this observer.
:return: transformed data container
:rtype: DataContainer
"""
while self.n_batches_left_to_do > 0:
task: QueuedPipelineTask = self.queue.get()
self.n_batches_left_to_do -= 1
step_name = task.step_name
if step_name not in self.result:
if not isinstance(task.data_container, DataContainer):
summary_id = None
else:
summary_id = task.data_container.summary_id
self.result[step_name] = ListDataContainer(
current_ids=[],
data_inputs=[],
expected_outputs=[],
summary_id=summary_id
)
self.result[step_name].append_data_container_in_data_inputs(task.data_container)
data_containers = self._join_all_step_results()
self.result = {}
return original_data_container.set_data_inputs(data_containers)
def _join_all_step_results(self):
"""
Concatenate all resulting data containers together.
:return:
"""
results = []
for step_name, data_containers in self.result.items():
self._raise_exception_throwned_by_workers_if_needed(data_containers)
step_results = self._join_step_results(data_containers)
results.append(step_results)
return results
def _raise_exception_throwned_by_workers_if_needed(self, data_containers):
for dc in data_containers.data_inputs:
if isinstance(dc, Exception):
# an exception has been throwned by the worker so reraise it here!
exception = dc
raise exception
def _join_step_results(self, data_containers):
# reorder results by summary id
data_containers.data_inputs.sort(key=lambda dc: self.summary_ids.index(dc.summary_id))
step_results = ListDataContainer.empty()
for data_container in data_containers.data_inputs:
data_container = data_container.set_summary_id(data_containers.data_inputs[-1].summary_id)
step_results.concat(data_container)
return step_results
|
the-stack_0_12242 | """This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
def tensor2im(input_image, index, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
index (int) -- #-th image of a batch is going to be displayed
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[index].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255. # post-processing: tranpose and scaling
# image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new |
the-stack_0_12244 | import numpy as np
import torch
import torch.nn.functional as F
from matplotlib import pyplot as plt
from skimage import morphology
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
def get_roc_plot_and_threshold(predictions, gt_list):
# calculate image-level ROC AUC score
# img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
predictions = np.asarray(predictions)
gt_list = np.asarray(gt_list)
fpr, tpr, thresholds = roc_curve(gt_list, predictions)
img_roc_auc = roc_auc_score(gt_list, predictions)
fig, ax = plt.subplots(1, 1)
fig_img_rocauc = ax
fig_img_rocauc.plot(fpr, tpr, label="ROC Curve (area = {:.2f})".format(img_roc_auc))
ax.set_xlabel("FPR")
ax.set_ylabel("TPR")
ax.set_title('Receiver operating characteristic')
ax.legend(loc="lower right")
precision, recall, thresholds = precision_recall_curve(gt_list, predictions)
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
best_threshold = thresholds[np.argmax(f1)]
return (fig, ax), best_threshold
def _embedding_concat(x, y):
B, C1, H1, W1 = x.size()
_, C2, H2, W2 = y.size()
s = int(H1 / H2)
x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
x = x.view(B, C1, -1, H2, W2)
z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
for i in range(x.size(2)):
z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
z = z.view(B, -1, H2 * W2)
z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)
return z
def get_embedding(features_1, features_2, features_3, embedding_ids, device):
embedding = features_1
embedding = _embedding_concat(embedding, features_2).to(device)
embedding = _embedding_concat(embedding, features_3).to(device)
# Select a random amount of embeddings
embedding = torch.index_select(embedding, dim=1, index=embedding_ids)
return embedding
def create_mask(img_score: np.ndarray, threshold):
idx_above_threshold = img_score > threshold
idx_below_threshold = img_score <= threshold
mask = img_score
mask[idx_above_threshold] = 1
mask[idx_below_threshold] = 0
kernel = morphology.disk(4)
mask = morphology.opening(mask, kernel)
# mask *= 255
return mask
|
the-stack_0_12246 | """
CEASIOMpy: Conceptual Aircraft Design Software
Developed by CFS ENGINEERING, 1015 Lausanne, Switzerland
Module containing the utilitary functions for the workflowcreator and optimization modules
Python version: >=3.6
| Author: Aidan Jungo
| Creation: 2020-02-25
| Last modifiction: 2020-04-24
TODO:
* ...
"""
#==============================================================================
# IMPORTS
#==============================================================================
import os
import subprocess
import shutil
import ceasiompy.utils.moduleinterfaces as mi
from ceasiompy.SettingsGUI.settingsgui import create_settings_gui
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
import ceasiompy.__init__
LIB_DIR = os.path.dirname(ceasiompy.__init__.__file__)
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
MODULE_NAME = os.path.basename(os.getcwd())
SU2_XPATH = '/cpacs/toolspecific/CEASIOMpy/aerodynamics/su2'
#==============================================================================
# FUNCTIONS
#==============================================================================
def copy_module_to_module(module_from, io_from, module_to, io_to):
""" Transfer CPACS file from one module to another.
Function 'copy_module_to_module' copy the CPACS file form ToolInput or
ToolOutput of 'module_from' to ToolInput or ToolOutput of 'module_to'
Args:
module_from (str): Name of the module the CPACS file is copy from
io_from (str): "in" or "out", for ToolInput or ToolOutput
module_to (str): Name of the module where the CPACS file will be copy
io_to (str): "in" or "out", for ToolInput or ToolOutput
"""
in_list = ['in','In','IN','iN','input','Input','INPUT','ToolInput','toolinput']
if io_from in in_list:
file_copy_from = mi.get_toolinput_file_path(module_from)
else: # 'out' or anything else ('out' by default)
file_copy_from = mi.get_tooloutput_file_path(module_from)
log.info('Copy CPACS from:'+ file_copy_from)
if io_to in in_list:
file_copy_to = mi.get_toolinput_file_path(module_to)
else: # 'out' or anything else ('out' by default)
file_copy_to = mi.get_tooloutput_file_path(module_to)
log.info('Copy CPACS to:'+ file_copy_to)
shutil.copy(file_copy_from,file_copy_to)
def run_subworkflow(module_to_run,cpacs_path_in='',cpacs_path_out=''):
"""Function to run a list of module in order.
Function 'run_subworkflow' will exectute in order all the module contained
in 'module_to_run' list. Every time the resuts of one module (generaly CPACS
file) will be copied as input for the next module.
Args:
module_to_run (list): List of mododule to run (in order)
cpacs_path_in (str): Path of the CPACS file use, if not already in the
ToolInput folder of the first submodule
cpacs_path_out (str): Path of the output CPACS file use, if not already
in the ToolInput folder of the first submodule
"""
if not module_to_run:
log.info('No module to run')
return 0
# Check non existing module
submodule_list = mi.get_submodule_list()
for module in module_to_run:
if module not in submodule_list:
raise ValueError('No module named "' + module + '"!')
# Copy the cpacs file in the first module
if cpacs_path_in:
shutil.copy(cpacs_path_in,mi.get_toolinput_file_path(module_to_run[0]))
log.info('The following modules will be executed: ' + str(module_to_run))
for m, module in enumerate(module_to_run):
log.info('\n')
log.info('######################################################################################')
log.info('Run module: ' + module)
log.info('######################################################################################\n')
# Go to the module directory
module_path = os.path.join(LIB_DIR,module)
print('\n Going to ',module_path,'\n')
os.chdir(module_path)
# Copy CPACS file from previous module to this one
if m > 0:
copy_module_to_module(module_to_run[m-1],'out',module,'in')
if module == 'SettingsGUI':
cpacs_path = mi.get_toolinput_file_path(module)
cpacs_out_path = mi.get_tooloutput_file_path(module)
create_settings_gui(cpacs_path,cpacs_out_path,module_to_run[m:])
else:
# Find the python file to run
for file in os.listdir(module_path):
if file.endswith('.py'):
if not file.startswith('__'):
main_python = file
# Run the module
error = subprocess.call(['python',main_python])
if error:
raise ValueError('An error ocured in the module '+ module)
# Copy the cpacs file in the first module
if cpacs_path_out:
shutil.copy(mi.get_tooloutput_file_path(module_to_run[-1]),cpacs_path_out)
|
the-stack_0_12247 | from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
lookup_fields = lookup_path.split('__')
# Remove the last item of the lookup path if it is a query term
if lookup_fields[-1] in QUERY_TERMS:
lookup_fields = lookup_fields[:-1]
# Now go through the fields (following all relations) and look for an m2m
for field_name in lookup_fields:
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
"""
field = opts.get_field(name)
if field.is_relation and field.many_to_one and not field.related_model:
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.related_model._meta.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, (models.IntegerField, models.FloatField)):
return formats.number_format(value)
elif isinstance(field, models.FileField) and value:
return format_html('<a href="{}">{}</a>', value.url, value)
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.remote_field.model
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
|
the-stack_0_12248 | import datetime
import minerl
import namesgenerator
from sacred import Experiment
import basalt_utils.wrappers as wrapper_utils
from minerl.herobraine.wrappers.video_recording_wrapper import VideoRecordingWrapper
from basalt_utils.sb3_compat.policies import SpaceFlatteningActorCriticPolicy
from basalt_utils.sb3_compat.cnns import MAGICALCNN
from basalt_utils.wrappers import SaveObsAndActions
from basalt_utils.callbacks import BatchEndIntermediateRolloutEvaluator, MultiCallback, BCModelSaver
from stable_baselines3.common.policies import ActorCriticCnnPolicy
from stable_baselines3.common.vec_env import DummyVecEnv
import collections
from imitation.algorithms.bc import BC
import imitation.data.rollout as il_rollout
import logging
import torch as th
from basalt_utils import utils
import os
import imitation.util.logger as imitation_logger
from sacred.observers import FileStorageObserver
from stable_baselines3.common.utils import get_device
from time import time
bc_baseline = Experiment("basalt_bc_baseline")
WRAPPERS = [# Maps from a string version of enum (found in the dataset) to an int version (expected for spaces.Discrete)
(wrapper_utils.EnumStrToIntWrapper, dict()),
# Transforms continuous camera action into discrete up/down/no-change buckets on both pitch and yaw
(wrapper_utils.CameraDiscretizationWrapper, dict()),
# Flattens a Dict action space into a Box, but retains memory of how to expand back out
(wrapper_utils.ActionFlatteningWrapper, dict()),
# Pull out only the POV observation from the observation space; transpose axes for SB3 compatibility
(utils.ExtractPOVAndTranspose, dict())] #,
def make_unique_timestamp() -> str:
"""Make a timestamp along with a random word descriptor: e.g. 2021-06-06_1236_boring_wozniac"""
ISO_TIMESTAMP = "%Y%m%d_%H%M"
timestamp = datetime.datetime.now().strftime(ISO_TIMESTAMP)
return f"{timestamp}_{namesgenerator.get_random_name()}"
@bc_baseline.config
def default_config():
task_name = "MineRLBasaltFindCave-v0"
train_batches = None
train_epochs = None
log_interval = 1
# TODO fix this
data_root = os.getenv('MINERL_DATA_ROOT')
# SpaceFlatteningActorCriticPolicy is a policy that supports a flattened Dict action space by
# maintaining multiple sub-distributions and merging their results
policy_class = SpaceFlatteningActorCriticPolicy
wrappers = WRAPPERS
save_dir_base = "results/"
save_dir = None
policy_filename = 'trained_policy.pt'
use_rollout_callback = False
rollout_callback_batch_interval = 1000
policy_save_interval = 1000
callback_rollouts = 5
save_videos = True
mode = 'train'
test_policy_path = 'train/trained_policy.pt'
test_n_rollouts = 5
# Note that `batch_size` needs to be less than the number of trajectories available for the task you're training on
batch_size = 32
n_traj = None
buffer_size = 15000
lr = 1e-4
_ = locals()
del _
@bc_baseline.config
def default_save_dir(save_dir_base, save_dir, task_name):
"""
Calculates a save directory by combining the base `save_dir` ("results" by default) with
the task name and a timestamp that contains both the time and a random name
"""
if save_dir is None:
save_dir = os.path.join(save_dir_base, task_name, make_unique_timestamp())
_ = locals()
del _
@bc_baseline.named_config
def normal_policy_class():
"""
This is a sacred named_config, which means that when `normal_policy_class` is added as a parameter
to a call of this experiment, the policy class will be set to ActorCriticCnnPolicy
"Normal" here is just used to mean the default CNN policy from Stable Baselines, rather than the one explicitly designed
to deal with multimodal action spaces (SpaceFlatteningActorCriticPolicy)
"""
policy_class = ActorCriticCnnPolicy
_ = locals()
del _
@bc_baseline.main
def main(mode):
if mode == 'train':
train_bc()
if mode == 'test':
test_bc()
@bc_baseline.capture
def test_bc(task_name, data_root, wrappers, test_policy_path, test_n_rollouts, save_dir):
os.makedirs(save_dir, exist_ok=True)
# Add a wrapper to the environment that records video and saves it in the
# the `save_dir` we have constructed for this run.
wrappers = [(VideoRecordingWrapper, {'video_directory':
os.path.join(save_dir, 'videos')}),
(SaveObsAndActions, {'save_dir':
os.path.join(save_dir, 'obs_and_actions')})] + wrappers
data_pipeline, wrapped_env = utils.get_data_pipeline_and_env(task_name, data_root, wrappers, dummy=False)
vec_env = DummyVecEnv([lambda: wrapped_env])
policy = th.load(test_policy_path, map_location=th.device(get_device('auto')))
trajectories = il_rollout.generate_trajectories(policy, vec_env, il_rollout.min_episodes(test_n_rollouts))
stats = il_rollout.rollout_stats(trajectories)
stats = collections.OrderedDict([(key, stats[key])
for key in sorted(stats)])
# print it out
kv_message = '\n'.join(f" {key}={value}"
for key, value in stats.items())
logging.info(f"Evaluation stats on '{task_name}': {kv_message}")
@bc_baseline.capture
def train_bc(task_name, batch_size, data_root, wrappers, train_epochs, n_traj, lr,
policy_class, train_batches, log_interval, save_dir, policy_filename,
use_rollout_callback, rollout_callback_batch_interval, callback_rollouts, save_videos,
buffer_size, policy_save_interval):
# This code is designed to let you either train for a fixed number of batches, or for a fixed number of epochs
assert train_epochs is None or train_batches is None, \
"Only one of train_batches or train_epochs should be set"
assert not (train_batches is None and train_epochs is None), \
"You cannot have both train_batches and train_epochs set to None"
# If you've set the `save_videos` flag, add a VideoRecordingWrapper with a directory set
# to the current `save_dir` to the environment wrappers
if save_videos:
wrappers = [(VideoRecordingWrapper, {'video_directory':
os.path.join(save_dir, 'videos')}),
(SaveObsAndActions, {'save_dir':
os.path.join(save_dir, 'obs_and_actions')})] + wrappers
# This `get_data_pipeline_and_env` utility is designed to be shared across multiple baselines
# It takes in a task name, data root, and set of wrappers and returns
# (1) An env object with the same environment spaces as you'd getting from making the env associated
# with this task and wrapping it in `wrappers`. Depending on the parameter passed into `dummy`, this is
# either the real wrapped environment, or a dummy environment that displays the same spaces,
# but without having to actually start up Minecraft
# (2) A MineRL DataPipeline that can be used to construct a batch_iter used by BC, and also as a handle to clean
# up that iterator after training.
data_pipeline, wrapped_env = utils.get_data_pipeline_and_env(task_name, data_root, wrappers,
dummy=not use_rollout_callback)
# This utility creates a data iterator that is basically a light wrapper around the baseline MineRL data iterator
# that additionally:
# (1) Applies all observation and action transformations specified by the wrappers in `wrappers`, and
# (2) Calls `np.squeeze` recursively on all the nested dict spaces to remove the sequence dimension, since we're
# just doing single-frame BC here
data_iter = utils.create_data_iterator(wrapped_env,
data_pipeline=data_pipeline,
batch_size=batch_size,
num_epochs=train_epochs,
num_batches=train_batches,
buffer_size=buffer_size)
if policy_class == SpaceFlatteningActorCriticPolicy:
policy = policy_class(observation_space=wrapped_env.observation_space,
action_space=wrapped_env.action_space,
env=wrapped_env,
lr_schedule=lambda _: 1e-4,
features_extractor_class=MAGICALCNN)
else:
policy = policy_class(observation_space=wrapped_env.observation_space,
action_space=wrapped_env.action_space,
lr_schedule=lambda _: 1e-4,
features_extractor_class=MAGICALCNN)
os.makedirs(save_dir, exist_ok=True)
imitation_logger.configure(save_dir, ["stdout", "tensorboard"])
callbacks = [BCModelSaver(policy=policy,
save_dir=os.path.join(save_dir, 'policy_checkpoints'),
save_interval_batches=policy_save_interval)]
if use_rollout_callback:
callbacks.append(BatchEndIntermediateRolloutEvaluator(policy=policy,
env=wrapped_env,
save_dir=os.path.join(save_dir, 'policy_rollouts'),
evaluate_interval_batches=rollout_callback_batch_interval,
n_rollouts=callback_rollouts))
callback_op = MultiCallback(callbacks)
bc_trainer = BC(
observation_space=wrapped_env.observation_space,
action_space=wrapped_env.action_space,
policy_class= lambda **kwargs: policy,
policy_kwargs=None,
expert_data=data_iter,
device='auto',
optimizer_cls=th.optim.Adam,
optimizer_kwargs=dict(lr=lr),
ent_weight=1e-3,
l2_weight=1e-5)
bc_trainer.train(n_epochs=train_epochs,
n_batches=train_batches,
log_interval=log_interval,
on_batch_end=callback_op)
bc_trainer.save_policy(policy_path=os.path.join(save_dir, policy_filename))
bc_baseline.add_artifact(os.path.join(save_dir, policy_filename))
bc_baseline.log_scalar(f'run_location={save_dir}', 1)
print("Training complete; cleaning up data pipeline!")
data_iter.close()
if __name__ == "__main__":
bc_baseline.observers.append(FileStorageObserver("sacred_results"))
bc_baseline.run_commandline()
|
the-stack_0_12249 | import asyncio
import typing
import warnings
from ..utils.logger import logger
from .auto_reload import _auto_reload
CallableAwaitable = typing.Union[typing.Callable, typing.Awaitable]
class TaskManager:
def __init__(
self,
loop: asyncio.AbstractEventLoop = None,
*,
on_shutdown: typing.Callable = None,
on_startup: typing.Callable = None,
auto_reload: bool = False,
auto_reload_dir: str = ".",
asyncio_debug_mode: bool = False,
):
self.tasks: typing.List[typing.Callable] = []
self.loop: asyncio.AbstractEventLoop = loop or asyncio.get_event_loop()
self.on_shutdown: CallableAwaitable = on_shutdown
self.on_startup: CallableAwaitable = on_startup
self.auto_reload: bool = auto_reload
self.auto_reload_dir: str = auto_reload_dir
self.loop.set_debug(asyncio_debug_mode)
def run(
self, **abandoned,
):
if len(abandoned):
warnings.warn("Pass options through __init__")
for option in abandoned:
setattr(self, option, abandoned[option])
if len(self.tasks) < 1:
raise RuntimeError("Count of tasks - 0. Add tasks.")
try:
if self.on_startup is not None:
self.loop.run_until_complete(self.on_startup())
if self.auto_reload:
self.loop.create_task(_auto_reload(self.auto_reload_dir))
[self.loop.create_task(task) for task in self.tasks]
self.loop.run_forever()
except KeyboardInterrupt:
logger.info("Keyboard Interrupt")
self.close()
finally:
if self.on_shutdown is not None:
self.loop.run_until_complete(self.on_shutdown())
if not self.loop.is_running():
self.close()
def close(self):
self.loop.close()
def add_task(self, task: typing.Union[typing.Coroutine, typing.Callable]):
if asyncio.iscoroutinefunction(task):
self.tasks.append(task())
elif asyncio.iscoroutine(task):
self.tasks.append(task)
else:
raise RuntimeError("Unexpected task. Tasks may be only coroutine functions")
def run_task(self, task: typing.Union[typing.Coroutine, typing.Callable]):
if asyncio.iscoroutinefunction(task):
self.loop.create_task(task())
elif asyncio.iscoroutine(task):
self.loop.create_task(task)
else:
raise RuntimeError("Unexpected task. Tasks may be only coroutine functions")
|
the-stack_0_12250 | from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.tab_mlp import MLP
from pytorch_widedeep.models.transformers._encoders import SaintEncoder
from pytorch_widedeep.models.transformers._embeddings_layers import (
CatAndContEmbeddings,
)
class SAINT(nn.Module):
r"""Defines a ``SAINT`` model
(`arXiv:2106.01342 <https://arxiv.org/abs/2106.01342>`_) that can be used
as the ``deeptabular`` component of a Wide & Deep model.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
{'education': 0, 'relationship': 1, 'workclass': 2, ...}
embed_input: List
List of Tuples with the column name and number of unique values
e.g. [('education', 11), ...]
embed_dropout: float, default = 0.1
Dropout to be applied to the embeddings matrix
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
:obj:`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If ``full_embed_dropout = True``, ``embed_dropout`` is ignored.
shared_embed: bool, default = False
The idea behind ``shared_embed`` is described in the Appendix A in the
`TabTransformer paper <https://arxiv.org/abs/2012.06678>`_: `'The
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns'`. In other
words, the idea is to let the model learn which column is embedded
at the time.
add_shared_embed: bool, default = False
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
``frac_shared_embed`` with the shared embeddings.
See :obj:`pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if ``add_shared_embed
= False``) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
embed_continuous_activation: str, default = None
String indicating the activation function to be applied to the
continuous embeddings, if any. ``tanh``, ``relu``, ``leaky_relu`` and
``gelu`` are supported.
cont_norm_layer: str, default = None,
Type of normalization layer applied to the continuous features before
they are embedded. Options are: ``layernorm``, ``batchnorm`` or
``None``.
input_dim: int, default = 32
The so-called *dimension of the model*. In general is the number of
embeddings used to encode the categorical and/or continuous columns
n_heads: int, default = 8
Number of attention heads per Transformer block
use_bias: bool, default = False
Boolean indicating whether or not to use bias in the Q, K, and V
projection layers
n_blocks: int, default = 2
Number of SAINT-Transformer blocks. 1 in the paper.
attn_dropout: float, default = 0.2
Dropout that will be applied to the Multi-Head Attention column and
row layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. ``tanh``, ``relu``,
``leaky_relu``, ``gelu``, ``geglu`` and ``reglu`` are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to ``[l, 4*l,
2*l]`` where ``l`` is the MLP input dimension
mlp_activation: str, default = "relu"
MLP activation function. ``tanh``, ``relu``, ``leaky_relu`` and
``gelu`` are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->
LIN -> ACT]``
Attributes
----------
cat_and_cont_embed: ``nn.Module``
This is the module that processes the categorical and continuous columns
transformer_blks: ``nn.Sequential``
Sequence of SAINT-Transformer blocks
transformer_mlp: ``nn.Module``
MLP component in the model
output_dim: int
The output dimension of the model. This is a required attribute
neccesary to build the WideDeep class
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import SAINT
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = SAINT(column_idx=column_idx, embed_input=embed_input, continuous_cols=continuous_cols)
>>> out = model(X_tab)
"""
def __init__(
self,
column_idx: Dict[str, int],
embed_input: Optional[List[Tuple[str, int]]] = None,
embed_dropout: float = 0.1,
full_embed_dropout: bool = False,
shared_embed: bool = False,
add_shared_embed: bool = False,
frac_shared_embed: float = 0.25,
continuous_cols: Optional[List[str]] = None,
embed_continuous_activation: str = None,
cont_norm_layer: str = None,
input_dim: int = 32,
use_bias: bool = False,
n_heads: int = 8,
n_blocks: int = 2,
attn_dropout: float = 0.1,
ff_dropout: float = 0.2,
transformer_activation: str = "gelu",
mlp_hidden_dims: Optional[List[int]] = None,
mlp_activation: str = "relu",
mlp_dropout: float = 0.1,
mlp_batchnorm: bool = False,
mlp_batchnorm_last: bool = False,
mlp_linear_first: bool = True,
):
super(SAINT, self).__init__()
self.column_idx = column_idx
self.embed_input = embed_input
self.embed_dropout = embed_dropout
self.full_embed_dropout = full_embed_dropout
self.shared_embed = shared_embed
self.add_shared_embed = add_shared_embed
self.frac_shared_embed = frac_shared_embed
self.continuous_cols = continuous_cols
self.embed_continuous_activation = embed_continuous_activation
self.cont_norm_layer = cont_norm_layer
self.input_dim = input_dim
self.use_bias = use_bias
self.n_heads = n_heads
self.n_blocks = n_blocks
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.with_cls_token = "cls_token" in column_idx
self.n_cat = len(embed_input) if embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
self.n_feats = self.n_cat + self.n_cont
self.cat_and_cont_embed = CatAndContEmbeddings(
input_dim,
column_idx,
embed_input,
embed_dropout,
full_embed_dropout,
shared_embed,
add_shared_embed,
frac_shared_embed,
False, # use_embed_bias
continuous_cols,
True, # embed_continuous,
embed_continuous_activation,
True, # use_cont_bias
cont_norm_layer,
)
self.transformer_blks = nn.Sequential()
for i in range(n_blocks):
self.transformer_blks.add_module(
"saint_block" + str(i),
SaintEncoder(
input_dim,
n_heads,
use_bias,
attn_dropout,
ff_dropout,
transformer_activation,
self.n_feats,
),
)
attn_output_dim = (
self.input_dim if self.with_cls_token else self.n_feats * self.input_dim
)
if not mlp_hidden_dims:
mlp_hidden_dims = [
attn_output_dim,
attn_output_dim * 4,
attn_output_dim * 2,
]
else:
assert mlp_hidden_dims[0] == attn_output_dim, (
f"The input dim of the MLP must be {attn_output_dim}. "
f"Got {mlp_hidden_dims[0]} instead"
)
self.transformer_mlp = MLP(
mlp_hidden_dims,
mlp_activation,
mlp_dropout,
mlp_batchnorm,
mlp_batchnorm_last,
mlp_linear_first,
)
# the output_dim attribute will be used as input_dim when "merging" the models
self.output_dim = mlp_hidden_dims[-1]
def forward(self, X: Tensor) -> Tensor:
x_cat, x_cont = self.cat_and_cont_embed(X)
if x_cat is not None:
x = x_cat
if x_cont is not None:
x = torch.cat([x, x_cont], 1) if x_cat is not None else x_cont
x = self.transformer_blks(x)
if self.with_cls_token:
x = x[:, 0, :]
else:
x = x.flatten(1)
return self.transformer_mlp(x)
@property
def attention_weights(self) -> List:
r"""List with the attention weights. Each element of the list is a tuple
where the first and the second elements are the column and row
attention weights respectively
The shape of the attention weights is:
- column attention: :math:`(N, H, F, F)`
- row attention: :math:`(1, H, N, N)`
where *N* is the batch size, *H* is the number of heads and *F* is the
number of features/columns in the dataset
"""
attention_weights = []
for blk in self.transformer_blks:
attention_weights.append(
(blk.col_attn.attn_weights, blk.row_attn.attn_weights)
)
return attention_weights
|
the-stack_0_12253 | # -*- coding: utf-8 -*-
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Sphinx-Themes template'
copyright = '2018, sphinx-themes.org'
author = 'sphinx-themes.org'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# If you want to have a consistent, platform independent look
# sphinxemoji_style = 'twemoji'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"external_links": [
("Github", "https://github.com/romnnn/sphinx_press_theme")
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# The default `html_sidebars` of Press theme: ['util/searchbox.html', 'util/sidetoc.html']
#
# html_sidebars = {'**': ['util/sidetoc.html']}
html_logo = 'https://mirrors.creativecommons.org/presskit/icons/heart.black.png'
#---sphinx-themes-----
html_theme = 'press'
|
the-stack_0_12254 | import math
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
app = FastAPI()
app.mount("/assets", StaticFiles(directory="assets"), name="assets")
templates = Jinja2Templates(directory="templates")
@app.get("/{id}")
def e_to_the_x(id: str):
intValue = int(id)
return { "e to the power of x" : math.exp(intValue)}
@app.get("/html/{id}", response_class=HTMLResponse)
async def index(request: Request, id:str):
intValue = int(id)
message = "e to the power of x: " + str(math.exp(intValue))
return templates.TemplateResponse("index.html", {"request": request, "message" : message})
|
the-stack_0_12255 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=15
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=8
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma215.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
the-stack_0_12256 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_explosion2754_all_of
except ImportError:
bt_explosion2754_all_of = sys.modules[
"onshape_client.oas.models.bt_explosion2754_all_of"
]
try:
from onshape_client.oas.models import bt_explosion_step_feature3008
except ImportError:
bt_explosion_step_feature3008 = sys.modules[
"onshape_client.oas.models.bt_explosion_step_feature3008"
]
try:
from onshape_client.oas.models import bt_microversion_id_and_configuration2338
except ImportError:
bt_microversion_id_and_configuration2338 = sys.modules[
"onshape_client.oas.models.bt_microversion_id_and_configuration2338"
]
try:
from onshape_client.oas.models import btm_assembly_feature887
except ImportError:
btm_assembly_feature887 = sys.modules[
"onshape_client.oas.models.btm_assembly_feature887"
]
try:
from onshape_client.oas.models import btm_feature134
except ImportError:
btm_feature134 = sys.modules["onshape_client.oas.models.btm_feature134"]
try:
from onshape_client.oas.models import btm_individual_query_with_occurrence_base904
except ImportError:
btm_individual_query_with_occurrence_base904 = sys.modules[
"onshape_client.oas.models.btm_individual_query_with_occurrence_base904"
]
try:
from onshape_client.oas.models import btm_parameter1
except ImportError:
btm_parameter1 = sys.modules["onshape_client.oas.models.btm_parameter1"]
class BTExplosion2754(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"explode_steps": (
[bt_explosion_step_feature3008.BTExplosionStepFeature3008],
), # noqa: E501
"starting_position_id": (
bt_microversion_id_and_configuration2338.BTMicroversionIdAndConfiguration2338,
), # noqa: E501
"feature_id": (str,), # noqa: E501
"feature_type": (str,), # noqa: E501
"import_microversion": (str,), # noqa: E501
"name": (str,), # noqa: E501
"namespace": (str,), # noqa: E501
"node_id": (str,), # noqa: E501
"parameters": ([btm_parameter1.BTMParameter1],), # noqa: E501
"return_after_subfeatures": (bool,), # noqa: E501
"sub_features": ([btm_feature134.BTMFeature134],), # noqa: E501
"suppressed": (bool,), # noqa: E501
"auxiliary_assembly_feature": (bool,), # noqa: E501
"feature_list_field_index": (int,), # noqa: E501
"occurrence_queries_from_all_configurations": (
[
btm_individual_query_with_occurrence_base904.BTMIndividualQueryWithOccurrenceBase904
],
), # noqa: E501
"version": (int,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"explode_steps": "explodeSteps", # noqa: E501
"starting_position_id": "startingPositionId", # noqa: E501
"feature_id": "featureId", # noqa: E501
"feature_type": "featureType", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"name": "name", # noqa: E501
"namespace": "namespace", # noqa: E501
"node_id": "nodeId", # noqa: E501
"parameters": "parameters", # noqa: E501
"return_after_subfeatures": "returnAfterSubfeatures", # noqa: E501
"sub_features": "subFeatures", # noqa: E501
"suppressed": "suppressed", # noqa: E501
"auxiliary_assembly_feature": "auxiliaryAssemblyFeature", # noqa: E501
"feature_list_field_index": "featureListFieldIndex", # noqa: E501
"occurrence_queries_from_all_configurations": "occurrenceQueriesFromAllConfigurations", # noqa: E501
"version": "version", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_explosion2754.BTExplosion2754 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
explode_steps ([bt_explosion_step_feature3008.BTExplosionStepFeature3008]): [optional] # noqa: E501
starting_position_id (bt_microversion_id_and_configuration2338.BTMicroversionIdAndConfiguration2338): [optional] # noqa: E501
feature_id (str): [optional] # noqa: E501
feature_type (str): [optional] # noqa: E501
import_microversion (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
parameters ([btm_parameter1.BTMParameter1]): [optional] # noqa: E501
return_after_subfeatures (bool): [optional] # noqa: E501
sub_features ([btm_feature134.BTMFeature134]): [optional] # noqa: E501
suppressed (bool): [optional] # noqa: E501
auxiliary_assembly_feature (bool): [optional] # noqa: E501
feature_list_field_index (int): [optional] # noqa: E501
occurrence_queries_from_all_configurations ([btm_individual_query_with_occurrence_base904.BTMIndividualQueryWithOccurrenceBase904]): [optional] # noqa: E501
version (int): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_explosion2754_all_of.BTExplosion2754AllOf,
btm_assembly_feature887.BTMAssemblyFeature887,
],
"oneOf": [],
}
|
the-stack_0_12257 | import re
from collections import Counter
def unique_words_counter(file_path):
with open(file_path, "r", encoding="utf-8") as file:
all_words = re.findall(r"[0-9a-zA-Z-']+", file.read())
all_words = [word.upper() for word in all_words]
print("Total Words: ", len(all_words))
words_counter = Counter()
for word in all_words:
words_counter[word] += 1
print("\nTop 20 words:")
for word in words_counter.most_common(20):
value, count = word
print(value, " : ", count)
if __name__ == "__main__":
unique_words_counter("data/shakespeare.txt")
|
the-stack_0_12258 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import (
Iterator,
List,
Optional,
Union,
)
from amundsen_rds.models import RDSModel
from amundsen_rds.models.table import TableFollower as RDSTableFollower
from amundsen_rds.models.user import User as RDSUser
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.models.follower_constants import FOLLOWER_OF_OBJECT_RELATION_TYPE, FOLLOWER_RELATION_TYPE
from databuilder.models.table_metadata import TableMetadata
from databuilder.models.table_serializable import TableSerializable
from databuilder.models.user import User
class Follower(GraphSerializable, TableSerializable):
LABELS_PERMITTED_TO_HAVE_FOLLOWER = ['Table', 'Dashboard']
def __init__(
self,
start_label: str,
start_key: str,
follower_emails: Union[List, str],
) -> None:
if start_label not in Follower.LABELS_PERMITTED_TO_HAVE_FOLLOWER:
raise Exception(f'followers for {start_label} are not supported')
self.start_label = start_label
self.start_key = start_key
if isinstance(follower_emails, str):
follower_emails = follower_emails.split(',')
self.follower_emails = [
email.strip().lower() for email in follower_emails
]
self._node_iter = self._create_node_iterator()
self._relation_iter = self._create_relation_iterator()
self._record_iter = self._create_record_iterator()
def __repr__(self) -> str:
return f'follower({self.start_label!r}, {self.start_key!r}, {self.follower_emails!r})'
def create_next_node(self) -> Optional[GraphNode]:
try:
return next(self._node_iter)
except StopIteration:
return None
def create_next_relation(self) -> Optional[GraphRelationship]:
try:
return next(self._relation_iter)
except StopIteration:
return None
def create_next_record(self) -> Union[RDSModel, None]:
try:
return next(self._record_iter)
except StopIteration:
return None
def _create_node_iterator(self) -> Iterator[GraphNode]:
for email in self.follower_emails:
if email:
yield GraphNode(key=User.get_user_model_key(email=email),
label=User.USER_NODE_LABEL,
attributes={
User.USER_NODE_EMAIL: email,
})
def _create_relation_iterator(self) -> Iterator[GraphRelationship]:
for email in self.follower_emails:
if email:
yield GraphRelationship(
start_label=self.start_label,
start_key=self.start_key,
end_label=User.USER_NODE_LABEL,
end_key=User.get_user_model_key(email=email),
type=FOLLOWER_RELATION_TYPE,
reverse_type=FOLLOWER_OF_OBJECT_RELATION_TYPE,
attributes={})
def _create_record_iterator(self) -> Iterator[RDSModel]:
for email in self.follower_emails:
if email:
user_record = RDSUser(rk=User.get_user_model_key(email=email),
email=email)
yield user_record
if self.start_label == TableMetadata.TABLE_NODE_LABEL:
yield RDSTableFollower(
table_rk=self.start_key,
user_rk=User.get_user_model_key(email=email),
)
else:
raise Exception(
f'{self.start_label}<>follower relationship is not table serializable'
)
|
the-stack_0_12259 | #!/usr/bin/env python
# coding=utf-8
import asyncio
import aiohttp
from .config import HEADERS, REQUEST_TIMEOUT, REQUEST_DELAY
async def _get_page(url, sleep):
"""
获取并返回网页内容
"""
async with aiohttp.ClientSession() as session:
try:
await asyncio.sleep(sleep)
async with session.get(
url, headers=HEADERS, timeout=REQUEST_TIMEOUT
) as resp:
return await resp.text()
except:
return ""
def requests(url, sleep=REQUEST_DELAY):
"""
请求方法,用于获取网页内容
:param url: 请求链接
:param sleep: 延迟时间(秒)
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
html = loop.run_until_complete(asyncio.gather(_get_page(url, sleep)))
loop.close()
if html:
return "".join(html)
|
the-stack_0_12265 | # -*- coding: utf-8 -*-
__title__ = 'stimson-web-scraper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
__maintainer__ = "The Stimson Center"
__maintainer_email = "[email protected]"
VIDEOS_TAGS = ['iframe', 'embed', 'object', 'video']
VIDEO_PROVIDERS = ['youtube', 'youtu.be', 'twitch', 'vimeo', 'dailymotion', 'kewego']
class Video(object):
"""Video object
"""
def __init__(self):
# type of embed
# embed, object, iframe
self.embed_type = None
# video provider name
self.provider = None
# width
self.width = None
# height
self.height = None
# embed code
self.embed_code = None
# src
self.src = None
class VideoExtractor(object):
"""Extracts a list of video from Article top node
"""
def __init__(self, config, top_node):
self.config = config
self.parser = self.config.get_parser()
self.top_node = top_node
self.candidates = []
self.movies = []
def get_embed_code(self, node):
return "".join([
line.strip()
for line in self.parser.node_to_string(node).splitlines()])
def get_embed_type(self, node):
return self.parser.get_tag(node)
def get_width(self, node):
return self.parser.get_attribute(node, 'width')
def get_height(self, node):
return self.parser.get_attribute(node, 'height')
def get_src(self, node):
return self.parser.get_attribute(node, 'src')
# noinspection PyMethodMayBeStatic
def get_provider(self, src):
if src:
for provider in VIDEO_PROVIDERS:
if provider in src:
return provider
return None
def get_video(self, node):
"""Create a video object from a video embed
"""
video = Video()
video.embed_code = self.get_embed_code(node)
video.embed_type = self.get_embed_type(node)
video.width = self.get_width(node)
video.height = self.get_height(node)
video.src = self.get_src(node)
video.provider = self.get_provider(video.src)
return video
def get_iframe_tag(self, node):
return self.get_video(node)
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def get_video_tag(self, node):
"""Extract html video tags
"""
return Video()
def get_embed_tag(self, node):
# embed node may have an object node as parent
# in this case we want to retrieve the object node
# instead of the embed
parent = self.parser.get_parent(node)
if parent is not None:
parent_tag = self.parser.get_tag(parent)
if parent_tag == 'object':
return self.get_object_tag(node)
return self.get_video(node)
def get_object_tag(self, node):
# test if object tag has en embed child
# in this case we want to remove the embed from
# the candidate list to avoid parsing it twice
child_embed_tag = self.parser.get_elements_by_tag(node, 'embed')
if child_embed_tag and child_embed_tag[0] in self.candidates:
self.candidates.remove(child_embed_tag[0])
# get the object source
# if we don't have a src node don't coninue
src_node = self.parser.get_elements_by_tag(
node, tag="param", attr="name", value="movie")
if not src_node:
return None
src = self.parser.get_attribute(src_node[0], "value")
# check provider
provider = self.get_provider(src)
if not provider:
return None
video = self.get_video(node)
video.provider = provider
video.src = src
return video
def get_videos(self):
self.candidates = self.parser.get_elements_by_tags(
self.top_node, VIDEOS_TAGS)
# loop all candidates
# and check if src attribute belongs to a video provider
for candidate in self.candidates:
tag = self.parser.get_tag(candidate)
attr = "get_%s_tag" % tag
if hasattr(self, attr):
movie = getattr(self, attr)(candidate)
if movie is not None and movie.provider is not None:
self.movies.append(movie)
return list(self.movies)
# append movies list to article
# self.article.movies = list(self.movies)
|
the-stack_0_12266 | from setuptools import setup, find_packages
from os import path
__version__ = "0.3.15"
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
with open(path.join(here, "requirements.txt"), encoding="utf-8") as f:
dependencies = [line for line in f if line]
setup(
name="office365",
version=__version__,
description="A wrapper around O365 offering subclasses with additional utility methods.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/matthewgdv/office",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.8",
],
packages=find_packages(exclude=["tests*"]),
install_requires=dependencies,
setup_requires=['setuptools_scm'],
include_package_data=True,
author="Matt GdV",
author_email="[email protected]"
)
|
the-stack_0_12267 | # File: S (Python 2.4)
from SCColorScheme import SCColorScheme
from otp.otpbase import OTPLocalizer
class SCSettings:
def __init__(self, eventPrefix, whisperMode = 0, colorScheme = None, submenuOverlap = OTPLocalizer.SCOsubmenuOverlap, topLevelOverlap = None):
self.eventPrefix = eventPrefix
self.whisperMode = whisperMode
if colorScheme is None:
colorScheme = SCColorScheme()
self.colorScheme = colorScheme
self.submenuOverlap = submenuOverlap
self.topLevelOverlap = topLevelOverlap
|
the-stack_0_12269 | import numpy as np
import pytest
import pandas as pd
from pandas.core.internals import BlockManager, SingleBlockManager
from pandas.core.internals.blocks import Block, NonConsolidatableMixIn
class CustomBlock(NonConsolidatableMixIn, Block):
_holder = np.ndarray
def formatting_values(self):
return np.array(["Val: {}".format(i) for i in self.values])
def concat_same_type(self, to_concat, placement=None):
"""
Always concatenate disregarding self.ndim as the values are
always 1D in this custom Block
"""
values = np.concatenate([blk.values for blk in to_concat])
return self.make_block_same_class(
values, placement=placement or slice(0, len(values), 1)
)
@pytest.fixture
def df():
df1 = pd.DataFrame({"a": [1, 2, 3]})
blocks = df1._data.blocks
values = np.arange(3, dtype="int64")
custom_block = CustomBlock(values, placement=slice(1, 2))
blocks = blocks + (custom_block,)
block_manager = BlockManager(blocks, [pd.Index(["a", "b"]), df1.index])
return pd.DataFrame(block_manager)
def test_custom_repr():
values = np.arange(3, dtype="int64")
# series
block = CustomBlock(values, placement=slice(0, 3))
s = pd.Series(SingleBlockManager(block, pd.RangeIndex(3)))
assert repr(s) == "0 Val: 0\n1 Val: 1\n2 Val: 2\ndtype: int64"
# dataframe
block = CustomBlock(values, placement=slice(0, 1))
blk_mgr = BlockManager([block], [["col"], range(3)])
df = pd.DataFrame(blk_mgr)
assert repr(df) == " col\n0 Val: 0\n1 Val: 1\n2 Val: 2"
def test_concat_series():
# GH17728
values = np.arange(3, dtype="int64")
block = CustomBlock(values, placement=slice(0, 3))
s = pd.Series(block, pd.RangeIndex(3), fastpath=True)
res = pd.concat([s, s])
assert isinstance(res._data.blocks[0], CustomBlock)
def test_concat_dataframe(df):
# GH17728
res = pd.concat([df, df])
assert isinstance(res._data.blocks[1], CustomBlock)
def test_concat_axis1(df):
# GH17954
df2 = pd.DataFrame({"c": [0.1, 0.2, 0.3]})
res = pd.concat([df, df2], axis=1)
assert isinstance(res._data.blocks[1], CustomBlock)
|
the-stack_0_12272 | from typing import Optional
import pytest
from odmantic.field import Field
from odmantic.model import EmbeddedModel, Model
from odmantic.reference import Reference
def test_field_defined_as_primary_key_and_custom_name():
with pytest.raises(
ValueError, match="cannot specify a primary field with a custom key_name"
):
Field(primary_field=True, key_name="not _id")
def test_field_defined_as_primary_key_default_name():
f = Field(primary_field=True)
assert f.key_name == "_id"
def test_field_define_key_as__id_without_setting_as_primary():
with pytest.raises(
ValueError,
match="cannot specify key_name='_id' without defining the field as primary",
):
Field(key_name="_id")
def test_pos_key_name():
class M(Model):
field: int = Field(key_name="alternate_name")
assert +M.field == "alternate_name"
assert ++M.field == "$alternate_name"
def test_unknown_attr_embedded_model():
class E(EmbeddedModel):
...
class M(Model):
field: E
with pytest.raises(AttributeError, match="attribute unknown_attr not found in E"):
M.field.unknown_attr # type: ignore
@pytest.mark.parametrize("operator_name", ("lt", "lte", "gt", "gte", "match"))
def test_reference_field_operator_not_allowed(operator_name: str):
class E(Model):
...
class M(Model):
field: E = Reference()
with pytest.raises(
AttributeError,
match=f"operator {operator_name} not allowed for ODMReference fields",
):
getattr(M.field, operator_name)
def test_field_required_in_doc_without_default():
class M(Model):
field: str
assert M.__odm_fields__["field"].is_required_in_doc()
def test_field_required_in_doc_with_default():
class M(Model):
field: str = Field("hi")
assert not M.__odm_fields__["field"].is_required_in_doc()
def test_field_required_in_doc_implicit_optional_default():
class M(Model):
field: Optional[str]
assert not M.__odm_fields__["field"].is_required_in_doc()
def test_field_required_in_doc_default_factory_disabled():
class M(Model):
field: str = Field(default_factory=lambda: "hi")
assert M.__odm_fields__["field"].is_required_in_doc()
def test_field_required_in_doc_default_factory_enabled():
class M(Model):
field: str = Field(default_factory=lambda: "hi")
class Config:
parse_doc_with_default_factories = True
assert not M.__odm_fields__["field"].is_required_in_doc()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.