ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40a095fcea33bbfa841197cd6e250f86bd518db | # bot.py
import os
import sys
import traceback
from random import uniform, random, choice, sample
import discord
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix='$', help_command=None, intents=intents)
client = discord.Client()
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
bot.run(TOKEN)
|
py | b40a0984d4be741133d05b263a216a2a2ccf43ad | from django.shortcuts import render
def index(request):
return render(request, './index.html')
|
py | b40a099e49904eb6801d113d646456d72c985ba1 | from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Mapping, Tuple
if TYPE_CHECKING:
from typing import Type
from cattr import GenConverter
KerningPair = Tuple[str, str]
class Kerning(Dict[KerningPair, float]):
def as_nested_dicts(self) -> dict[str, dict[str, float]]:
result: dict[str, dict[str, float]] = {}
for (left, right), value in self.items():
result.setdefault(left, {})[right] = value
return result
@classmethod
def from_nested_dicts(self, kerning: Mapping[str, Mapping[str, float]]) -> Kerning:
return Kerning(
((left, right), kerning[left][right])
for left in kerning
for right in kerning[left]
)
def _unstructure(self, converter: GenConverter) -> dict[str, dict[str, float]]:
del converter # unused
return self.as_nested_dicts()
@staticmethod
def _structure(
data: Mapping[str, Mapping[str, float]],
cls: Type[Kerning],
converter: GenConverter,
) -> Kerning:
del converter # unused
return cls.from_nested_dicts(data)
|
py | b40a09d95d898536c11489cfd41d9fdcb018b5e9 | import os
import json
import argparse
import itertools
import math
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.cuda.amp import autocast, GradScaler
import commons
import utils
from data_utils import (
TextAudioLoader,
TextAudioCollate,
DistributedBucketSampler
)
from models import (
SynthesizerTrn,
MultiPeriodDiscriminator,
)
from losses import (
generator_loss,
discriminator_loss,
feature_loss,
kl_loss
)
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
from text.symbols import symbols
torch.backends.cudnn.benchmark = True
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '80000'
hps = utils.get_hparams()
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
def run(rank, n_gpus, hps):
print("rank :")
print(rank)
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
print(hps.data.training_files)
print(hps.data)
train_dataset = TextAudioLoader(hps.data.training_files, hps.data)
train_sampler = DistributedBucketSampler(
train_dataset,
hps.train.batch_size,
[32,300,400,500,600,700,800,900,1000],
#[32,500,700,900],
num_replicas=n_gpus,
rank=rank,
shuffle=True)
collate_fn = TextAudioCollate()
train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
collate_fn=collate_fn, batch_sampler=train_sampler)
if rank == 0:
eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data)
eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False,
batch_size=hps.train.batch_size, pin_memory=True,
drop_last=False, collate_fn=collate_fn)
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).cuda(rank)
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
optim_g = torch.optim.AdamW(
net_g.parameters(),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps)
optim_d = torch.optim.AdamW(
net_d.parameters(),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps)
net_g = DDP(net_g, device_ids=[rank])
net_d = DDP(net_d, device_ids=[rank])
try:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g)
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
global_step = (epoch_str - 1) * len(train_loader)
except:
epoch_str = 1
global_step = 0
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
scaler = GradScaler(enabled=hps.train.fp16_run)
for epoch in range(epoch_str, hps.train.epochs + 1):
if rank==0:
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
else:
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
scheduler_g.step()
scheduler_d.step()
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
net_g, net_d = nets
optim_g, optim_d = optims
scheduler_g, scheduler_d = schedulers
train_loader, eval_loader = loaders
if writers is not None:
writer, writer_eval = writers
train_loader.batch_sampler.set_epoch(epoch)
global global_step
net_g.train()
net_d.train()
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(train_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
with autocast(enabled=hps.train.fp16_run):
y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
(z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths)
mel = spec_to_mel_torch(
spec,
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.mel_fmin,
hps.data.mel_fmax)
y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
y_hat_mel = mel_spectrogram_torch(
y_hat.squeeze(1),
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.hop_length,
hps.data.win_length,
hps.data.mel_fmin,
hps.data.mel_fmax
)
y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
# Discriminator
y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
with autocast(enabled=False):
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
loss_disc_all = loss_disc
optim_d.zero_grad()
scaler.scale(loss_disc_all).backward()
scaler.unscale_(optim_d)
grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
scaler.step(optim_d)
with autocast(enabled=hps.train.fp16_run):
# Generator
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
with autocast(enabled=False):
loss_dur = torch.sum(l_length.float())
loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
loss_fm = feature_loss(fmap_r, fmap_g)
loss_gen, losses_gen = generator_loss(y_d_hat_g)
loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
optim_g.zero_grad()
scaler.scale(loss_gen_all).backward()
scaler.unscale_(optim_g)
grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
scaler.step(optim_g)
scaler.update()
if rank==0:
if global_step % hps.train.log_interval == 0:
lr = optim_g.param_groups[0]['lr']
losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
logger.info('Train Epoch: {} [{:.0f}%]'.format(
epoch,
100. * batch_idx / len(train_loader)))
logger.info([x.item() for x in losses] + [global_step, lr])
scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
image_dict = {
"slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
"slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
"all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
"all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
}
utils.summarize(
writer=writer,
global_step=global_step,
images=image_dict,
scalars=scalar_dict)
if global_step % hps.train.eval_interval == 0:
evaluate(hps, net_g, eval_loader, writer_eval)
utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
global_step += 1
if rank == 0:
logger.info('====> Epoch: {}'.format(epoch))
def evaluate(hps, generator, eval_loader, writer_eval):
generator.eval()
with torch.no_grad():
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(eval_loader):
x, x_lengths = x.cuda(0), x_lengths.cuda(0)
spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
y, y_lengths = y.cuda(0), y_lengths.cuda(0)
# remove else
x = x[:1]
x_lengths = x_lengths[:1]
spec = spec[:1]
spec_lengths = spec_lengths[:1]
y = y[:1]
y_lengths = y_lengths[:1]
break
y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, max_len=1000)
y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
mel = spec_to_mel_torch(
spec,
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.mel_fmin,
hps.data.mel_fmax)
y_hat_mel = mel_spectrogram_torch(
y_hat.squeeze(1).float(),
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.hop_length,
hps.data.win_length,
hps.data.mel_fmin,
hps.data.mel_fmax
)
image_dict = {
"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
}
audio_dict = {
"gen/audio": y_hat[0,:,:y_hat_lengths[0]]
}
if global_step == 0:
image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
utils.summarize(
writer=writer_eval,
global_step=global_step,
images=image_dict,
audios=audio_dict,
audio_sampling_rate=hps.data.sampling_rate
)
generator.train()
if __name__ == "__main__":
main()
|
py | b40a0a89e9fb0d8375bd86a3e8ed685fc0ec34ee | import os
import os.path as op
from flask import Flask
from flask.ext import superadmin
from flask.ext.superadmin.contrib import fileadmin
# Create flask app
app = Flask(__name__, template_folder='templates', static_folder='files')
# Create dummy secrey key so we can use flash
app.config['SECRET_KEY'] = '123456790'
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
if __name__ == '__main__':
# Create directory
path = op.join(op.dirname(__file__), 'files')
try:
os.mkdir(path)
except OSError:
pass
# Create admin interface
admin = superadmin.Admin(app)
admin.add_view(fileadmin.FileAdmin(path, '/files/', name='Files'))
# Start app
app.debug = True
app.run()
|
py | b40a0ad70690acaf406033db1d09d351fb006d52 | import logging
class Publisher:
subscribers = set()
def subscribe(self, subscriber):
self.subscribers.add(subscriber)
def notify_subscribers(self, added_name):
for a_subscriber in self.subscribers:
a_subscriber.update(added_name)
class Subscriber:
def update(self, added_name):
pass
class LoggingUtility(Subscriber):
def update(self, added_name):
logging.basicConfig(level=logging.DEBUG,
format="%(levelname)s %(asctime)s - %(message)s")
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.info(f"Name '{added_name}' added!")
class InsertNames(Publisher):
def __init__(self):
self.__name_set = set()
def insert(self):
insert_name = True
while insert_name:
a_name = input("Enter a name: ").strip().title()
self.__name_set.add(a_name)
self.notify_subscribers(a_name)
insert_name = input(
"Do you wanna input another name? (y/n) ").strip().startswith("y")
def main():
insert_names = InsertNames()
insert_names.subscribe(LoggingUtility())
insert_names.insert()
if __name__ == "__main__":
main() |
py | b40a0b8dacb1d7baf8c4dfe1e6e23f717a1f1a90 | # -*- coding: utf-8 -*-
"""
"""
from __future__ import division, print_function, unicode_literals
import numpy as np
import declarative
from ..utilities.future_from_2 import super
from . import bases
from . import multi_unit_args as mua
from .import units
def generate_F_Hz():
@declarative.group_dproperty
def F_Hz(desc):
return mua.generate_refval_attribute(
desc,
ubunch = units.frequency,
stems = ['F', ],
ctree_name = 'frequency',
preferred_attr = 'frequency',
default_attr = 'frequency',
prototypes = ['full'],
)
return F_Hz
class Frequency(
bases.FrequencyBase,
bases.SystemElementBase
):
F_Hz = generate_F_Hz()
@declarative.dproperty
def F_center_Hz(self, val = declarative.NOARG):
if val is declarative.NOARG:
val = self.F_Hz.ref
val = self.ctree.setdefault('F_center_Hz', val)
return val
def __init__(
self,
F_width_Hz = 0,
order = None,
**kwargs
):
super().__init__(**kwargs)
bases.PTREE_ASSIGN(self).F_width_Hz = F_width_Hz
bases.PTREE_ASSIGN(self).order = order
return
|
py | b40a0bd5ef8433647984ecdddbf4cd0a5b078050 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
list_len = 0
if head:
node = head.next
else:
return True
while node:
list_len += 1
node = node.next
stack = list()
i = 0
node = head
while node:
if i == list_len / 2:
node = node.next
i += 1
elif list_len == 0 or i < list_len / 2:
stack.append(node.val)
node = node.next
i += 1
else:
if node.val == stack.pop():
node = node.next
i += 1
else:
return False
return True
|
py | b40a0c1e2492cad31de00d9766694c0273c0087f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('minecraft', '0003_auto_20141117_0226'),
]
operations = [
migrations.CreateModel(
name='LogAction',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('action', models.CharField(max_length=500)),
('arguments', models.TextField(blank=True)),
('webop', models.ForeignKey(to='minecraft.WebOperator', related_name='log')),
],
options={
'verbose_name': 'Log Action',
'verbose_name_plural': 'Log Actions',
'ordering': ['-timestamp'],
},
bases=(models.Model,),
),
migrations.AddField(
model_name='weboperator',
name='gamemode',
field=models.IntegerField(default=0, choices=[(0, 'Survival'), (1, 'Creative'), (2, 'Adventure'), (3, 'Spectator')]),
preserve_default=True,
),
]
|
py | b40a0c52dad32afb7e6a3a21d51dcadb8f0708d2 | #!/usr/bin/env python3
import torch
from ..._utils.approximation_methods import approximation_parameters
from ..._utils.attribution import NeuronAttribution, GradientAttribution
from ..._utils.batching import _batched_operator
from ..._utils.common import (
_reshape_and_sum,
_format_input_baseline,
_format_additional_forward_args,
_validate_input,
_format_attributions,
_expand_additional_forward_args,
_expand_target,
_verify_select_column,
)
from ..._utils.gradient import compute_layer_gradients_and_eval
class NeuronConductance(NeuronAttribution, GradientAttribution):
def __init__(self, forward_func, layer, device_ids=None):
r"""
Args:
forward_func (callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which neuron attributions are computed.
Attributions for a particular neuron in the input or output
of this layer are computed using the argument neuron_index
in the attribute method.
Currently, only layers with a single tensor input or output
are supported.
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list(int)): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
def attribute(
self,
inputs,
neuron_index,
baselines=None,
target=None,
additional_forward_args=None,
n_steps=50,
method="riemann_trapezoid",
internal_batch_size=None,
attribute_to_neuron_input=False,
):
r"""
Computes conductance with respect to particular hidden neuron. The
returned output is in the shape of the input, showing the attribution
/ conductance of each input feature to the selected hidden layer neuron.
The details of the approach can be found here:
https://arxiv.org/abs/1805.12233
Args:
inputs (tensor or tuple of tensors): Input for which neuron
conductance is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_index (int or tuple): Index of neuron in output of given
layer for which attribution is desired. Length of
this tuple must be one less than the number of
dimensions in the output of the given layer (since
dimension 0 corresponds to number of examples).
An integer may be provided instead of a tuple of
length 1.
baselines (scalar, tensor, tuple of scalars or tensors, optional):
Baselines define the starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, tensor or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (string, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size internal_batch_size,
which are computed (forward / backward passes)
sequentially.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations are
processed in one batch.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*tensor* or tuple of *tensors* of **attributions**:
- **attributions** (*tensor* or tuple of *tensors*):
Conductance for
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_cond = NeuronConductance(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # Computes neuron conductance for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_cond.attribute(input, (4,1,2))
"""
is_inputs_tuple = isinstance(inputs, tuple)
inputs, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inputs, baselines, n_steps, method)
num_examples = inputs[0].shape[0]
total_batch = num_examples * n_steps
# Retrieve scaling factors for specified approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
# Compute scaled inputs from baseline to final input.
scaled_features_tpl = tuple(
torch.cat(
[baseline + alpha * (input - baseline) for alpha in alphas], dim=0
).requires_grad_()
for input, baseline in zip(inputs, baselines)
)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# apply number of steps to additional forward args
# currently, number of steps is applied only to additional forward arguments
# that are nd-tensors. It is assumed that the first dimension is
# the number of batches.
# dim -> (#examples * #steps x additional_forward_args[0].shape[1:], ...)
input_additional_args = (
_expand_additional_forward_args(additional_forward_args, n_steps)
if additional_forward_args is not None
else None
)
expanded_target = _expand_target(target, n_steps)
# Conductance Gradients - Returns gradient of output with respect to
# hidden layer and hidden layer evaluated at each input.
layer_gradients, layer_eval, input_grads, _ = _batched_operator(
compute_layer_gradients_and_eval,
scaled_features_tpl,
input_additional_args,
internal_batch_size=internal_batch_size,
forward_fn=self.forward_func,
layer=self.layer,
target_ind=expanded_target,
gradient_neuron_index=neuron_index,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
# Layer gradients and eval
assert (
len(layer_gradients) == 1 and len(layer_eval) == 1
), "Layer can only have 1 output tensor for neuron attribution!"
layer_gradients = layer_gradients[0]
layer_eval = layer_eval[0]
# Multiplies by appropriate gradient of output with respect to hidden neurons
# mid_grads is a 1D Tensor of length num_steps*internal_batch_size,
# containing mid layer gradient for each input step.
mid_grads = _verify_select_column(layer_gradients, neuron_index)
scaled_input_gradients = tuple(
input_grad
* mid_grads.reshape((total_batch,) + (1,) * (len(input_grad.shape) - 1))
for input_grad in input_grads
)
# Mutliplies by appropriate step size.
scaled_grads = tuple(
scaled_input_gradient.contiguous().view(n_steps, -1)
* torch.tensor(step_sizes).view(n_steps, 1).to(scaled_input_gradient.device)
for scaled_input_gradient in scaled_input_gradients
)
# Aggregates across all steps for each tensor in the input tuple
total_grads = tuple(
_reshape_and_sum(scaled_grad, n_steps, num_examples, input_grad.shape[1:])
for (scaled_grad, input_grad) in zip(scaled_grads, input_grads)
)
# computes attribution for each tensor in input tuple
# attributions has the same dimensionality as inputs
attributions = tuple(
total_grad * (input - baseline)
for total_grad, input, baseline in zip(total_grads, inputs, baselines)
)
return _format_attributions(is_inputs_tuple, attributions)
|
py | b40a0cb47c8dbbce6d042d5405a64a9eac1353b1 | class Node:
def __init__(self, val_, next_ = None, random_ = None):
self.val = val_
self.next = next_
self.random = random_
class Solution:
def copyRandomList(self, head):
# check if head is none
if head == None:
return None
# make a dictionary of nodes
dict_nodes = {}
# keep a temp node
temp = head
# traverse and add nodes to the dictionary
while temp:
dict_nodes[temp] = Node(temp.val)
temp = temp.next
# set temp to head again
temp = head
while temp:
if temp.next != None:
dict_nodes[temp].next = temp.next
if temp.random != None:
dict_nodes[temp].random = temp.random
temp = temp.next
# return copy of head
return dict_nodes[head]
def main():
mySol = Solution()
root = Node(7)
root.next = Node(13, None, root)
root.next.next = Node(11)
root.next.next.next = Node(10, None, root.next.next)
root.next.next.next.next = Node(55, None, root.next)
newRoot = mySol.copyRandomList(root)
if newRoot == root:
cpy = "Not Copied"
else:
cpy = "Copied"
print("Was the list copied? ")
print(cpy)
if __name__ == "__main__":
main() |
py | b40a0da875b10880b49d5815ed5e957b54f0ecf8 | # -*- encoding: utf-8 -*-
from openerp import models, fields, api, _
from openerp.exceptions import UserError, ValidationError
import datetime
import logging
class AccountInvoice(models.Model):
_inherit = "account.invoice"
tipo_gasto = fields.Selection([('compra', 'Compra/Bien'), ('servicio', 'Servicio'), ('importacion', 'Importación/Exportación'), ('combustible', 'Combustible'), ('mixto', 'Mixto')], string="Tipo de Gasto", default="compra")
numero_viejo = fields.Char(string="Numero Viejo")
serie_rango = fields.Char(string="Serie Rango")
inicial_rango = fields.Integer(string="Inicial Rango")
final_rango = fields.Integer(string="Final Rango")
diario_facturas_por_rangos = fields.Boolean(string='Las facturas se ingresan por rango', help='Cada factura realmente es un rango de factura y el rango se ingresa en Referencia/Descripción', related="journal_id.facturas_por_rangos")
nota_debito = fields.Boolean(string='Nota de debito')
def suma_impuesto(self,impuestos_ids):
suma_monto = 0
for impuesto in impuestos_ids:
suma_monto += impuesto.amount
return suma_monto
def impuesto_global(self):
impuestos = self.env['l10n_gt_extra.impuestos'].search([['active','=',True],['tipo','=','compra']])
impuestos_valores = []
diferencia = 0
suma_impuesto = 0
impuesto_total = 0
rango_final_anterior = 0
for rango in impuestos.rangos_ids:
if self.amount_untaxed > rango.rango_final and diferencia == 0:
diferencia = self.amount_untaxed - rango.rango_final
impuesto_individual = rango.rango_final * (self.suma_impuesto(rango.impuestos_ids) / 100)
suma_impuesto += impuesto_individual
impuestos_valores.append({'nombre': rango.impuestos_ids[0].name,'impuesto_id': rango.impuestos_ids[0].id,'account_id': rango.impuestos_ids[0].account_id.id,'total': impuesto_individual})
elif self.amount_untaxed <= rango.rango_final and diferencia == 0 and rango_final_anterior == 0:
impuesto_individual = self.amount_untaxed * (self.suma_impuesto(rango.impuestos_ids) / 100)
suma_impuesto += impuesto_individual
rango_final_anterior = rango.rango_final
impuestos_valores.append({'nombre': rango.impuestos_ids[0].name,'impuesto_id': rango.impuestos_ids[0].id,'account_id': rango.impuestos_ids[0].account_id.id,'total': impuesto_individual})
elif diferencia > 0:
impuesto_individual = diferencia * (self.suma_impuesto(rango.impuestos_ids) / 100)
suma_impuesto += impuesto_individual
impuestos_valores.append({'nombre': rango.impuestos_ids[0].name,'impuesto_id': rango.impuestos_ids[0].id,'account_id': rango.impuestos_ids[0].account_id.id,'total': impuesto_individual})
impuesto_total = 0
self.update({'amount_tax': suma_impuesto,'amount_total': impuesto_total + self.amount_untaxed})
account_invoice_tax = self.env['account.invoice.tax']
for impuesto in impuestos_valores:
account_invoice_tax.create({'invoice_id': self.id,'tax_id':impuesto['impuesto_id'],'name': impuesto['nombre'],'account_id': impuesto['account_id'],'amount':impuesto['total'] })
return True
@api.constrains('reference')
def _validar_factura_proveedor(self):
if self.reference:
facturas = self.search([('reference','=',self.reference), ('partner_id','=',self.partner_id.id), ('type','=','in_invoice')])
if len(facturas) > 1:
raise ValidationError("Ya existe una factura con ese mismo numero.")
@api.constrains('inicial_rango', 'final_rango')
def _validar_rango(self):
if self.diario_facturas_por_rangos:
if int(self.final_rango) < int(self.inicial_rango):
raise ValidationError('El número inicial del rango es mayor que el final.')
cruzados = self.search([('serie_rango','=',self.serie_rango), ('inicial_rango','<=',self.inicial_rango), ('final_rango','>=',self.inicial_rango)])
if len(cruzados) > 1:
raise ValidationError('Ya existe otra factura con esta serie y en el mismo rango')
cruzados = self.search([('serie_rango','=',self.serie_rango), ('inicial_rango','<=',self.final_rango), ('final_rango','>=',self.final_rango)])
if len(cruzados) > 1:
raise ValidationError('Ya existe otra factura con esta serie y en el mismo rango')
cruzados = self.search([('serie_rango','=',self.serie_rango), ('inicial_rango','>=',self.inicial_rango), ('inicial_rango','<=',self.final_rango)])
if len(cruzados) > 1:
raise ValidationError('Ya existe otra factura con esta serie y en el mismo rango')
self.name = "{}-{} al {}-{}".format(self.serie_rango, self.inicial_rango, self.serie_rango, self.final_rango)
def action_cancel(self):
for rec in self:
rec.numero_viejo = rec.number
return super(AccountInvoice, self).action_cancel()
class AccountPayment(models.Model):
_inherit = "account.payment"
descripcion = fields.Char(string="Descripción")
numero_viejo = fields.Char(string="Numero Viejo")
nombre_impreso = fields.Char(string="Nombre Impreso")
no_negociable = fields.Boolean(string="No Negociable", default=True)
anulado = fields.Boolean('Anulado')
fecha_anulacion = fields.Date('Fecha anulación')
def cancel(self):
for rec in self:
rec.write({'numero_viejo': rec.name})
return super(AccountPayment, self).cancel()
def anular(self):
for rec in self:
for move in rec.move_line_ids.mapped('move_id'):
move.button_cancel()
rec.move_line_ids.remove_move_reconcile()
rec.move_line_ids.write({ 'debit': 0, 'credit': 0, 'amount_currency': 0 })
for move in rec.move_line_ids.mapped('move_id'):
move.post()
rec.anulado = True
rec.fecha_anulacion = datetime.datetime.strftime(datetime.datetime.now(),'%Y-%m-%d')
class AccountJournal(models.Model):
_inherit = "account.journal"
direccion = fields.Many2one('res.partner', string='Dirección')
codigo_establecimiento = fields.Integer(string='Código de establecimiento')
facturas_por_rangos = fields.Boolean(string='Las facturas se ingresan por rango', help='Cada factura realmente es un rango de factura y el rango se ingresa en Referencia/Descripción')
usar_referencia = fields.Boolean(string='Usar referencia para libro de ventas', help='El número de la factua se ingresa en Referencia/Descripción')
|
py | b40a0e01046827e79ffb61d14a45caa0c7a114e9 | def invert_dict(d):
return {v: k for k, v in d.items()}
|
py | b40a0e4b4ad2e0cfac624bdb6c0ff24a45cc24d6 | import Plugins.Plugin
from Components.Language import language
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_LANGUAGE
import os, gettext, hashlib
__version__ = "1.7.5"
PluginLanguageDomain = "WebInterface"
PluginLanguagePath = "Extensions/WebInterface/locale"
def localeInit():
lang = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
os.environ["LANGUAGE"] = lang # Enigma doesn't set this (or LC_ALL, LC_MESSAGES, LANG). gettext needs it!
print "[WebInterface] set language to ", lang
gettext.bindtextdomain(PluginLanguageDomain, resolveFilename(SCOPE_PLUGINS, PluginLanguagePath))
def _(txt):
t = gettext.dgettext(PluginLanguageDomain, txt)
if t == txt:
print "[WebInterface] fallback to default translation for", txt
t = gettext.gettext(txt)
return t
def bin2long(s):
return reduce( lambda x,y:(x<<8L)+y, map(ord, s))
def long2bin(l):
res = ""
for byte in range(128):
res += chr((l >> (1024 - (byte + 1) * 8)) & 0xff)
return res
def rsa_pub1024(src, mod):
return long2bin(pow(bin2long(src), 65537, bin2long(mod)))
def decrypt_block(src, mod):
if len(src) != 128 and len(src) != 202:
return None
dest = rsa_pub1024(src[:128], mod)
hash = hashlib.sha1(dest[1:107])
if len(src) == 202:
hash.update(src[131:192])
result = hash.digest()
if result == dest[107:127]:
return dest
return None
localeInit()
language.addCallback(localeInit)
|
py | b40a0e5e9140d2cc284699bd51941471b419e984 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import BaseEstimator, clone
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition.pca import PCA, RandomizedPCA
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
class IncorrectT(BaseEstimator):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
""" Test the various init parameters of the pipeline.
"""
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params()
params2 = pipe2.get_params()
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
""" Test the various methods of the pipeline (anova).
"""
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
"""Test that the pipeline can take fit parameters
"""
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
"""Test the various methods of the pipeline (pca + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
"""Test the various methods of the pipeline (preprocessing + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("pca", pca), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different pca object to control the random_state stream
fs = FeatureUnion([("pca", pca), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_feature_names():
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
|
py | b40a0eb324955f1009a77fc1a284ada303ea6c3c | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee [email protected] #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
# 导入sys模块的所有成员
from sys import *
# 使用导入成员的语法,直接使用成员的别名访问
print(argv[0])
print(winver)
|
py | b40a0fefdcea6d99325438356e535fa981829051 | import unittest, inspect, json
from .context import WDL
class TestEval(unittest.TestCase):
def test_expr_render(self):
# types
self.assertEqual(str(WDL.parse_expr("false")), "false")
self.assertEqual(str(WDL.parse_expr("1")), "1")
self.assertEqual(str(WDL.parse_expr("1.1")), "1.1")
self.assertEqual(str(WDL.parse_expr('"Some text with a ~{placeholder}"')), '"Some text with a ~{placeholder}"')
self.assertEqual(str(WDL.parse_expr('["An", "Array"]')), '["An", "Array"]')
self.assertEqual(str(WDL.parse_expr('{"A": "Map"}')), '{"A": "Map"}')
self.assertEqual(str(WDL.parse_expr('("A", "Pair")')), '("A", "Pair")')
self.assertEqual(str(WDL.parse_expr('object {"A": "struct"}', "1.0")), '{"A": "struct"}')
# logic
self.assertEqual(str(WDL.parse_expr("true && false")), "true && false")
self.assertEqual(str(WDL.parse_expr("true || false")), "true || false")
self.assertEqual(str(WDL.parse_expr("true && false || true")), "true && false || true")
self.assertEqual(str(WDL.parse_expr("!true")), "!true")
self.assertEqual(str(WDL.parse_expr("if true then 1 else 2")), "if true then 1 else 2")
# comparisons
self.assertEqual(str(WDL.parse_expr("1 == 2")), "1 == 2")
self.assertEqual(str(WDL.parse_expr("1 != 2")), "1 != 2")
self.assertEqual(str(WDL.parse_expr("1 >= 2")), "1 >= 2")
self.assertEqual(str(WDL.parse_expr("1 <= 2")), "1 <= 2")
self.assertEqual(str(WDL.parse_expr("1 > 2")), "1 > 2")
self.assertEqual(str(WDL.parse_expr("1 < 2")), "1 < 2")
# arithmetics
self.assertEqual(str(WDL.parse_expr("1+1")), "1 + 1")
self.assertEqual(str(WDL.parse_expr("1-1")), "1 - 1")
self.assertEqual(str(WDL.parse_expr("1/1")), "1 / 1")
self.assertEqual(str(WDL.parse_expr("1*1")), "1 * 1")
self.assertEqual(str(WDL.parse_expr("1%1")), "1 % 1")
self.assertEqual(str(WDL.parse_expr("1*1")), "1 * 1")
# functions
self.assertEqual(str(WDL.parse_expr("defined(value)")), "defined(value)")
self.assertEqual(str(WDL.parse_expr("select_first([1, 2])")), "select_first([1, 2])")
# access
self.assertEqual(str(WDL.parse_expr("[1,2][1]")), "[1, 2][1]")
self.assertEqual(str(WDL.parse_expr("{1:2}[1]")), "{1: 2}[1]")
self.assertEqual(str(WDL.parse_expr("a.b")), "a.b")
# if-then-else
self.assertEqual(str(WDL.parse_expr("if false then 1+1 else 2+2")), "if false then 1 + 1 else 2 + 2")
# combinations
combo = "[treu][1] || defined(var) && !(8 == 1 * (12 + if false then 2 else 3) / 6)"
self.assertEqual(str(WDL.parse_expr(combo)), combo)
combo2 = "(if true then 1 else 2 * 8) % a.b - 16"
self.assertEqual(str(WDL.parse_expr(combo2)), combo2)
combo3 = "defined(if true then hey else hello)"
self.assertEqual(str(WDL.parse_expr(combo3)), combo3)
ifthenelsechain = "!if true then if false then true else false else true"
self.assertEqual(str(WDL.parse_expr(ifthenelsechain)), ifthenelsechain)
def test_boolean(self):
expr = WDL.parse_expr("true")
expr.infer_type([])
self.assertIsInstance(expr.type, WDL.Type.Boolean)
self.assertEqual(str(expr.type), "Boolean")
val = expr.eval([])
self.assertIsInstance(val, WDL.Value.Boolean)
self.assertEqual(str(val.type), "Boolean")
self.assertEqual(val.value, True)
self.assertEqual(str(val), "true")
self.assertEqual(val, WDL.Value.Boolean(True))
self.assertNotEqual(val, WDL.Value.Boolean(False))
expr = WDL.parse_expr("false")
expr.infer_type([])
self.assertEqual(str(expr.type), "Boolean")
val = expr.eval([])
self.assertEqual(str(val.type), "Boolean")
self.assertEqual(val.value, False)
self.assertEqual(str(val), "false")
self.assertEqual(val, WDL.Value.Boolean(False))
self.assertNotEqual(val, WDL.Value.Boolean(True))
def _test_tuples(self, *tuples):
for tuple in tuples:
assert(len(tuple) >= 2)
expr = tuple[0]
expected = tuple[1]
env = None
expected_type = None
exn = None
version = None
for x in tuple[2:]:
if isinstance(x, WDL.Env.Bindings):
env = x
elif isinstance(x, WDL.Type.Base):
expected_type = x
elif isinstance(x, str):
version = x
elif inspect.isclass(x):
exn = x
else:
assert False
type_env = WDL.Env.Bindings()
if env is not None:
for binding in env:
type_env = type_env.bind(binding.name, binding.value.type)
if exn:
with self.assertRaises(exn, msg=expected):
x = WDL.parse_expr(expr, version=version).infer_type(type_env).eval(env)
else:
v = WDL.parse_expr(expr, version=version).infer_type(type_env).eval(env).expect(expected_type)
self.assertEqual(str(v), expected, str(expr))
def test_logic(self):
self._test_tuples(
("true && true", "true", WDL.Type.Boolean()),
("true && false", "false"),
("false && true", "false"),
("false && false", "false"),
("true || true", "true"),
("true || false", "true"),
("false || true", "true"),
("false || false", "false"),
("false && true || true && true", "true"),
("true && !false || false", "true"),
("!true", "false"),
("!!true", "true"),
("!false", "true"),
("!false && true", "true"),
("!false && false", "false"),
("!true && false", "false"),
("!(false && false)", "true"),
("!(false && true)", "true"),
)
def test_arithmetic(self):
self._test_tuples(
("1", "1"),
("-1","-1"),
("0-1","-1"),
("1+1", "2"),
("2*3+4","10"),
("2*(3+4)","14"),
("2+3*4","14"),
("1+6/3*4","9"),
("1-4/3","0"),
("1--4/3","3"), # -4/3 == -2, is this right?
("4%2","0"),
("4%3","1"),
("1 + false", "(Ln 1, Col 1) Non-numeric operand to + operator", WDL.Error.IncompatibleOperand)
)
def test_cmp(self):
self._test_tuples(
("1 == 1","true"),
("1 == 0","false"),
("1 != 1","false"),
("1 != 0","true"),
("1 < 1","false"),
("1 <= 1","true"),
("1<2","true"),
("1<=2","true"),
("1>1","false"),
("1>=1","true"),
("1>2","false"),
("1>=0","true"),
("3<2 || 1>=0","true"),
("3<2&&1>=0","false"),
("3<2&&1>=0||1==1","true"),
("1 == false", "(Ln 1, Col 1) Cannot compare Int and Boolean", WDL.Error.IncompatibleOperand)
)
def test_str(self):
self._test_tuples(
('"true"', '"true"', WDL.Type.String()),
('"true" == "true"', 'true', WDL.Type.Boolean()),
('"true" != "true"', 'false', WDL.Type.Boolean()),
('"true" == "foo"', 'false', WDL.Type.Boolean()),
('"true" != "bar"', 'true', WDL.Type.Boolean()),
('"foo" + "bar"', '"foobar"'),
('"foo" + 1', '"foo1"'),
('2.0 + "bar"', '"2.0bar"'),
('17 + "42"', '"1742"'),
(""" 'foo' + "bar" """, '"foobar"'),
('"{"', '"{"', WDL.Type.String()),
('"$" + "$"', '"$$"', WDL.Type.String()))
self._test_tuples(
(r'''"CNN is working frantically to find their \"source.\""''',
r'''"CNN is working frantically to find their \"source.\""'''),
(r"""'CNN is working frantically to find their "source."'""",
r'''"CNN is working frantically to find their \"source.\""'''),
(r"""'The fact is that many anonymous sources don\'t even exist.'""",
r'''"The fact is that many anonymous sources don't even exist."''')
)
def test_if(self):
self._test_tuples(
("if false then 0 else 1","1", WDL.Type.Int()),
("if true then 0 else 1","0"),
("if false then 0 else 1+2","3"),
("(if false then 0 else 1)+2","3"),
("(if 1>0 then 1+1 else 1)+1","3"),
("if 1>0 then if true then 1 else 2 else 3","1"),
("if 3.14 then 0 else 1", "(Ln 1, Col 1) Expected Boolean instead of Float; in if condition", WDL.Error.StaticTypeMismatch),
("if 0 < 1 then 0 else false", "(Ln 1, Col 1) Expected Int instead of Boolean (unable to unify consequent & alternative types)", WDL.Error.StaticTypeMismatch),
("if true then 1 else 2.0", "1.0", WDL.Type.Float()),
("if false then 1 else 2.0", "2.0", WDL.Type.Float()),
("if true then 1.0 else 2", "1.0", WDL.Type.Float()),
("if false then 1.0 else 2", "2.0", WDL.Type.Float())
)
def test_array(self):
expr = WDL.parse_expr("[true,false]")
expr.infer_type([])
self.assertEqual(str(expr.type), "Array[Boolean]+")
env = []
val = expr.eval(env)
self.assertIsInstance(val, WDL.Value.Array)
self.assertEqual(str(val.type), "Array[Boolean]+")
self.assertEqual(str(val), "[true, false]")
self._test_tuples(
("[true, false][0]", "true"),
("[true, false][1]", "false"),
("[1+2, 3*4][1]", "12"),
("[1,2,3,]", "[1, 2, 3]"),
("[1,'a']", '["1", "a"]'),
("[]","[]", WDL.Type.Array(WDL.Type.Any())),
("[] == []","true"),
("[1, false]", '["1", "false"]', WDL.Type.Array(WDL.Type.String(), nonempty=True)),
("[1, {}]", "(Ln 1, Col 1) Expected Int instead of Boolean; inconsistent types within array", WDL.Error.IndeterminateType),
("1 + 2[3]", "(Ln 1, Col 5) Not an array", WDL.Error.NotAnArray),
("[1, 2, 3][true]", "(Ln 1, Col 11) Expected Int instead of Boolean; Array index", WDL.Error.StaticTypeMismatch),
("[1, 2, 3][4]", "(Ln 1, Col 11) Array index out of bounds", WDL.Error.OutOfBounds)
)
def test_float_coercion(self):
self._test_tuples(
("1 + 1.0", "2.0", WDL.Type.Float()),
("1.0 + 1", "2.0", WDL.Type.Float()),
("1 == 1.0", "true"),
("1 == 1.1", "false"),
("1 != 1.1", "true"),
("1 < 1.0", "false"),
("1 <= 1.0", "true"),
("[1, 2.0]", "[1.0, 2.0]", WDL.Type.Array(WDL.Type.Float())),
("[1, 2.0][0]", "1.0", WDL.Type.Float()),
# TODO: more sophisticated unification algo to handle this
# ("[[1],[2.0]]", "[[1.0], [2.0]]", WDL.Type.Array(WDL.Type.Float())),
)
def test_ident(self):
env = cons_env(("pi", WDL.Value.Float(3.14159)), ("e", WDL.Value.Float(2.71828)),
("t", WDL.Value.Boolean(True)), ("f", WDL.Value.Boolean(False)),
("true_rep_only", WDL.Value.Boolean(False)),
("lefty", WDL.Value.Boolean(False)),
("left_recursive", WDL.Value.Boolean(False)))
self._test_tuples(
("pi", "3.14159", WDL.Type.Float(), env),
("bogus", "(Ln 1, Col 1) Unknown identifier", WDL.Error.UnknownIdentifier, env),
("pi+e", "5.85987", env),
("t||f", "true", WDL.Type.Boolean(), env),
("if t then pi else e", "3.14159", env),
("true_rep_only", "false", env),
("lefty", "false", env),
("left_recursive", "false", env)
)
def test_interpolation(self):
env = cons_env(("pi", WDL.Value.Float(3.14159)), ("e", WDL.Value.Float(2.71828)),
("t", WDL.Value.Boolean(True)), ("f", WDL.Value.Boolean(False)),
("s", WDL.Value.String("foo")))
self._test_tuples(
('"${pi}"', '"3.14159"', env),
('"pi = ${pi}!"', '"pi = 3.14159!"', env),
('"pi+e = ${pi+e}!"', '"pi+e = 5.85987!"', env),
("'This is ${t}'", '"This is true"', env),
("'${f} is ${f}'", '"false is false"', env),
("'${s}bar'", '"foobar"', env),
('"$"','"$"'),
('"$shell"','"$shell"'),
("'c$'",'"c$"'),
("'The U.$. is re$pected again!'",'"The U.$. is re$pected again!"')
)
self._test_tuples(
('"${pi} ~{pi}$"', '"3.14159 ~{pi}$"', env, "draft-2"),
("'${pi} ~{pi}$'", '"3.14159 ~{pi}$"', env, "draft-2"),
('"${pi} ~{pi}$"', '"3.14159 3.14159$"', env, "1.0"),
("'${pi} ~{pi}~'", '"3.14159 3.14159~"', env, "1.0"),
("'$${pi}$'", '"$3.14159$"', env, "draft-2"),
('"$${pi}$$"', '"$3.14159$$"', env, "draft-2"),
("'$${pi}$'", '"$3.14159$"', env, "1.0"),
("'$${pi}$$'", '"$3.14159$$"', env, "1.0"),
("'$$${pi}~'", '"$$3.14159~"', env, "1.0"),
("'~~{pi}~'", '"~3.14159~"', env, "1.0"),
('"~~{pi}~"', '"~3.14159~"', env, "1.0"),
("'~~${pi}~'", '"~~3.14159~"', env, "1.0"),
("'$~{pi}~~'", '"$3.14159~~"', env, "1.0"),
("'$~${pi}~~'", '"$~3.14159~~"', env, "1.0"),
)
def test_pair(self):
env = cons_env(("p", WDL.Value.Pair(WDL.Type.Float(), WDL.Type.Float(),
(WDL.Value.Float(3.14159), WDL.Value.Float(2.71828)))),
("q", WDL.Value.Pair(WDL.Type.Pair(WDL.Type.Int(), WDL.Type.Int()),
WDL.Type.Float(optional=True),
(WDL.Value.Pair(WDL.Type.Int(), WDL.Type.Int(),
(WDL.Value.Int(4), WDL.Value.Int(2))),
WDL.Value.Null()))))
self._test_tuples(
("(1,2)", "(1,2)", WDL.Type.Pair(WDL.Type.Int(), WDL.Type.Int())),
("(1,2).left", "1"),
("(1,false).right", "false"),
("(false,[1,2]).right[1]", "2"),
("[1,2].left", "", WDL.Error.NoSuchMember),
("false.right", "", WDL.Error.NoSuchMember),
("p.left", "3.14159", env),
("p.right", "2.71828", env),
("q.left.left", "4", env),
("q.left.right", "2", env)
)
def test_map(self):
self._test_tuples(
("{'foo': 1, 'bar': 2}['bar']", "2"),
("{'foo': 1, 'bar': 2, 'baz': 3.0}['bar']", "2.0", WDL.Type.Float()),
("{0: 1, 2: 3}[false]", "", WDL.Error.StaticTypeMismatch),
("{0: 1, 2: 3}['foo']", "", WDL.Error.EvalError),
("{'foo': 1, 'bar': 2}[3]", "", WDL.Error.OutOfBounds), # int coerces to string...
("{3: 1, false: 2}", "", WDL.Error.IndeterminateType),
("{'foo': true, 'bar': 0,}", '{"foo": true, "bar": 0}', WDL.Type.Map((WDL.Type.String(), WDL.Type.String())))
)
def test_errors(self):
self._test_tuples(
("1 + bogus(2)", "(Ln 1, Col 5) No such function: bogus", WDL.Error.NoSuchFunction)
)
def test_short_circuit(self):
self._test_tuples(
("true && 1/0 == 1", "", WDL.Error.EvalError),
("false && 1/0 == 1", "false"),
("false || 1/0 == 1", "", WDL.Error.EvalError),
("true || 1/0 == 1", "true"),
)
def cons_env(*bindings):
b = WDL.Env.Bindings()
for (x,y) in bindings:
b = WDL.Env.Bindings(WDL.Env.Binding(x,y), b)
return b
class TestEnv(unittest.TestCase):
"""
Test the trickier recursive Env operations
"""
def test_bind(self):
e = WDL.Env.Bindings(WDL.Env.Binding("foo", "bar"))
self.assertEqual(e.resolve("foo"), "bar")
e = e.bind("fruit.orange", "a")
self.assertEqual(len(list(e)), 2)
self.assertEqual(e.resolve("foo"), "bar")
self.assertEqual(e.resolve("fruit.orange"), "a")
e = e.bind("fruit.pear", "b")
self.assertEqual(len(list(e)), 3)
self.assertEqual(e.resolve("foo"), "bar")
self.assertEqual(e.resolve("fruit.orange"), "a")
self.assertEqual(e.resolve("fruit.pear"), "b")
e = e.bind("fruit.apple.honeycrisp", "c").bind("fruit.apple.macintosh", "d")
self.assertEqual(len(list(e)), 5)
self.assertEqual(e.resolve("foo"), "bar")
self.assertEqual(e.resolve("fruit.orange"), "a")
self.assertEqual(e.resolve("fruit.pear"), "b")
self.assertEqual(len(list(e.enter_namespace("fruit.apple"))), 2)
self.assertEqual(e.resolve("fruit.apple.honeycrisp"), "c")
self.assertEqual(e.resolve("fruit.apple.macintosh"), "d")
def test_subtract(self):
e = WDL.Env.Bindings()
e = e.bind("foo", "bar").bind("fruit.orange", "a").bind("fruit.pear", "b")
e = e.bind("fruit.apple.honeycrisp", "c").bind("fruit.apple.macintosh", "d")
e = e.bind("fruit.grape.red", "e").bind("fruit.grape.green", "f")
rhs = WDL.Env.Bindings().bind("fruit.pear","b").bind("fruit.apple.honeycrisp","c")
e = e.subtract(rhs)
with self.assertRaises(KeyError):
e.resolve("fruit.pear")
with self.assertRaises(KeyError):
e.resolve("fruit.apple.honeycrisp")
self.assertEqual(e.resolve("foo"), "bar")
self.assertEqual(e.resolve("fruit.orange"), "a")
self.assertEqual(e.resolve("fruit.apple.macintosh"), "d")
self.assertEqual(e.resolve("fruit.grape.green"), "f")
e = e.subtract(WDL.Env.Bindings(WDL.Env.Binding("fruit.apple.macintosh", None)))
with self.assertRaises(KeyError):
e.resolve("fruit.apple.macintosh")
self.assertFalse(e.has_namespace("fruit.apple"))
self.assertEqual(e.resolve("foo"), "bar")
self.assertEqual(e.resolve("fruit.orange"), "a")
self.assertEqual(e.resolve("fruit.grape.green"), "f")
def test_namespaces(self):
e = WDL.Env.Bindings().bind("fruit.apple.honeycrisp", 42)
self.assertTrue(e.has_namespace("fruit.apple"))
self.assertTrue(e.has_namespace("fruit."))
self.assertFalse(e.has_namespace("fruit.apple.honeycrisp"))
e = WDL.Env.Bindings().with_empty_namespace("fruit.apple")
self.assertTrue(e.has_namespace("fruit.apple"))
self.assertTrue(e.has_namespace("fruit."))
self.assertFalse(e.has_namespace("fruit.apple.honeycrisp"))
e = WDL.Env.merge(WDL.Env.Bindings().with_empty_namespace("fruit.apple"),
WDL.Env.Bindings().with_empty_namespace("fruit.orange"))
self.assertTrue(e.has_namespace("fruit.apple"))
self.assertTrue(e.has_namespace("fruit.orange"))
self.assertTrue(e.has_namespace("fruit."))
e = WDL.Env.Bindings().with_empty_namespace("apple.").with_empty_namespace("orange").wrap_namespace("fruit")
self.assertTrue(e.has_namespace("fruit.apple"))
self.assertTrue(e.has_namespace("fruit.orange"))
self.assertTrue(e.has_namespace("fruit."))
class TestValue(unittest.TestCase):
def test_json(self):
pty = WDL.Type.StructInstance("Person")
pty.members = {
"name": WDL.Type.String(), "age": WDL.Type.Int(),
"pets": WDL.Type.Map((WDL.Type.String(), WDL.Type.Int()), optional=True)
}
cases = [
(WDL.Type.Boolean(), True),
(WDL.Type.Boolean(), False),
(WDL.Type.Int(), 42),
(WDL.Type.Float(), 3.14),
(WDL.Type.String(), 'CNN is working frantically to find their "source."'),
(WDL.Type.String(optional=True), None),
(WDL.Type.File(), '/tmp/stdout.txt'),
(WDL.Type.Array(WDL.Type.String()), ["apple", "orange"]),
(WDL.Type.Array(WDL.Type.String(optional=True)), ["apple", "orange", None]),
(WDL.Type.Map((WDL.Type.String(), WDL.Type.Int())), {"cats": 42, "dogs": 99}),
(pty, {"name": "Alyssa", "age": 42, "pets": None}),
(pty, {"name": "Alyssa", "age": 42, "pets": {"cats": 42, "dogs": 99}}),
(WDL.Type.Array(WDL.Type.Pair(WDL.Type.String(), WDL.Type.Int())), [["a",0],["b",1]]),
(WDL.Type.Boolean(), 42, WDL.Error.InputError),
(WDL.Type.Float(), "your president", WDL.Error.InputError),
(WDL.Type.String(), None, WDL.Error.InputError),
(pty, {"name": "Alyssa", "age": None, "pets": None}, WDL.Error.InputError),
(pty, {"name": "Alyssa", "age": 42}, WDL.Error.InputError),
(pty, {"name": "Alyssa", "age": 42, "pets": None, "address": "No 4, Privet Drive"}, WDL.Error.InputError),
]
for t in cases:
if len(t) >= 3 and inspect.isclass(t[2]):
with self.assertRaises(t[2]):
WDL.Value.from_json(t[0],t[1])
else:
self.assertEqual(t[1], WDL.Value.from_json(t[0],t[1]).json)
self.assertEqual(
WDL.parse_expr('object {"name": "Alyssa", "age": 42, "address": "No 4, Privet Drive"}',
version="1.0").infer_type([]).eval([]).json,
{"name": "Alyssa", "age": 42, "address": "No 4, Privet Drive"}
)
def test_env_json(self):
doc = WDL.parse_document(R"""
version 1.0
workflow w {
call t as s
}
task t {
input {
String who
Int age = 42
}
command {}
output {
String message = read_string("dummy")
}
}
""")
doc.typecheck()
def rt(exe, d):
namespace = ""
if isinstance(exe, WDL.Workflow):
namespace = exe.name
self.assertEqual(WDL.values_to_json(WDL.values_from_json(d, exe.available_inputs, exe.required_inputs, namespace=namespace), namespace=namespace), d)
rt(doc.tasks[0], {"who": "Alyssa"})
rt(doc.tasks[0], {"who": "Alyssa", "age": 24})
with self.assertRaises(WDL.Error.InputError):
rt(doc.tasks[0], {"who": "Alyssa", "bogus": "Ben"})
with self.assertRaises(WDL.Error.InputError):
rt(doc.tasks[0], {})
rt(doc.workflow, {"w.s.who": "Alyssa"})
rt(doc.workflow, {"w.s.who": "Alyssa", "w.s.age": 24})
with self.assertRaises(WDL.Error.InputError):
rt(doc.workflow, {})
with self.assertRaises(WDL.Error.InputError):
rt(doc.workflow, {".who": "a"})
with self.assertRaises(WDL.Error.InputError):
rt(doc.workflow, {"w.s..who": "b"})
# misc functionality
self.assertEqual(WDL.values_to_json(doc.workflow.required_inputs, "w"), {"w.s.who": "String"})
self.assertEqual(WDL.values_to_json(doc.workflow._type_env), {"s.message": "String"})
|
py | b40a1017c702dcfa1627ca4eda61f435649fa65a | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
# noinspection PyProtectedMember
from numpy.testing import assert_allclose
from numpy.testing import assert_array_less
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from sklearn.metrics import roc_auc_score
from sklearn.base import clone
from scipy.stats import rankdata
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyod.models.cof import COF
from pyod.utils.data import generate_data
class TestFastCOF(unittest.TestCase):
def setUp(self):
self.n_train = 100
self.n_test = 50
self.contamination = 0.1
self.roc_floor = 0.8
self.X_train, self.y_train, self.X_test, self.y_test = generate_data(
n_train=self.n_train, n_test=self.n_test,
contamination=self.contamination, random_state=42)
self.clf = COF(contamination=self.contamination)
self.clf.fit(self.X_train)
def test_parameters(self):
if not (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None):
raise AssertionError
if not (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None):
raise AssertionError
if not (hasattr(self.clf, 'threshold_') and
self.clf.threshold_ is not None):
raise AssertionError
if not (hasattr(self.clf, 'n_neighbors_') and
self.clf.n_neighbors_ is not None):
raise AssertionError
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
# check performance
if (roc_auc_score(self.y_test, pred_scores) < self.roc_floor):
raise AssertionError
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
if (pred_proba.min() < 0):
raise AssertionError
if (pred_proba.max() > 1):
raise AssertionError
def test_prediction_proba_linear(self):
pred_proba = self.clf.predict_proba(self.X_test, method='linear')
if (pred_proba.min() < 0):
raise AssertionError
if (pred_proba.max() > 1):
raise AssertionError
def test_prediction_proba_unify(self):
pred_proba = self.clf.predict_proba(self.X_test, method='unify')
if (pred_proba.min() < 0):
raise AssertionError
if (pred_proba.max() > 1):
raise AssertionError
def test_prediction_proba_parameter(self):
with assert_raises(ValueError):
self.clf.predict_proba(self.X_test, method='something')
def test_prediction_labels_confidence(self):
pred_labels, confidence = self.clf.predict(self.X_test,
return_confidence=True)
assert_equal(pred_labels.shape, self.y_test.shape)
assert_equal(confidence.shape, self.y_test.shape)
if (confidence.min() < 0):
raise AssertionError
if (confidence.max() > 1):
raise AssertionError
def test_prediction_proba_linear_confidence(self):
pred_proba, confidence = self.clf.predict_proba(self.X_test,
method='linear',
return_confidence=True)
if (pred_proba.min() < 0):
raise AssertionError
if (pred_proba.max() > 1):
raise AssertionError
assert_equal(confidence.shape, self.y_test.shape)
if (confidence.min() < 0):
raise AssertionError
if (confidence.max() > 1):
raise AssertionError
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')
def test_predict_rank(self):
pred_scores = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test)
print(pred_ranks)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_scores), atol=2)
assert_array_less(pred_ranks, self.X_train.shape[0] + 1)
assert_array_less(-0.1, pred_ranks)
def test_predict_rank_normalized(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=2)
assert_array_less(pred_ranks, 1.01)
assert_array_less(-0.1, pred_ranks)
def test_check_parameters(self):
with assert_raises(ValueError):
COF(contamination=0.1, n_neighbors=-1)
with assert_raises(ValueError):
COF(contamination=10., n_neighbors=5)
with assert_raises(TypeError):
COF(contamination=0.1, n_neighbors='not int')
with assert_raises(TypeError):
COF(contamination='not float', n_neighbors=5)
cof_ = COF(contamination=0.1, n_neighbors=10000)
cof_.fit(self.X_train)
if self.X_train.shape[0] <= cof_.n_neighbors_:
raise AssertionError
# todo: fix clone issue
def test_model_clone(self):
pass
# clone_clf = clone(self.clf)
def tearDown(self):
pass
class TestMemoryCOF(unittest.TestCase):
def setUp(self):
self.n_train = 100
self.n_test = 50
self.contamination = 0.1
self.roc_floor = 0.8
self.X_train, self.y_train, self.X_test, self.y_test = generate_data(
n_train=self.n_train, n_test=self.n_test,
contamination=self.contamination, random_state=42)
self.clf = COF(contamination=self.contamination, method="memory")
self.clf.fit(self.X_train)
def test_parameters(self):
if not (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None):
raise AssertionError
if not (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None):
raise AssertionError
if not (hasattr(self.clf, 'threshold_') and
self.clf.threshold_ is not None):
raise AssertionError
if not (hasattr(self.clf, 'n_neighbors_') and
self.clf.n_neighbors_ is not None):
raise AssertionError
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
# check performance
if (roc_auc_score(self.y_test, pred_scores) < self.roc_floor):
raise AssertionError
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
if (pred_proba.min() < 0):
raise AssertionError
if (pred_proba.max() > 1):
raise AssertionError
def test_prediction_proba_linear(self):
pred_proba = self.clf.predict_proba(self.X_test, method='linear')
if (pred_proba.min() < 0):
raise AssertionError
if (pred_proba.max() > 1):
raise AssertionError
def test_prediction_proba_unify(self):
pred_proba = self.clf.predict_proba(self.X_test, method='unify')
if (pred_proba.min() < 0):
raise AssertionError
if (pred_proba.max() > 1):
raise AssertionError
def test_prediction_proba_parameter(self):
with assert_raises(ValueError):
self.clf.predict_proba(self.X_test, method='something')
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')
def test_predict_rank(self):
pred_scores = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test)
print(pred_ranks)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_scores), atol=2)
assert_array_less(pred_ranks, self.X_train.shape[0] + 1)
assert_array_less(-0.1, pred_ranks)
def test_predict_rank_normalized(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=2)
assert_array_less(pred_ranks, 1.01)
assert_array_less(-0.1, pred_ranks)
def test_check_parameters(self):
with assert_raises(ValueError):
COF(contamination=0.1, n_neighbors=-1)
with assert_raises(ValueError):
COF(contamination=10., n_neighbors=5)
with assert_raises(TypeError):
COF(contamination=0.1, n_neighbors='not int')
with assert_raises(TypeError):
COF(contamination='not float', n_neighbors=5)
cof_ = COF(contamination=0.1, n_neighbors=10000)
cof_.fit(self.X_train)
if self.X_train.shape[0] <= cof_.n_neighbors_:
raise AssertionError
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
py | b40a10ddd9f6c23c565027f95171d1834c5ef07c | # coding: utf-8
# DJANGO IMPORTS
from django.contrib import admin
# PROJECT IMPORTS
from grappelli.tests.models import Category, Entry
site = admin.AdminSite(name="Admin Site")
class CategoryOptions(admin.ModelAdmin):
list_display = ("id", "name",)
list_display_links = ("name",)
class EntryOptions(admin.ModelAdmin):
list_display = ("id", "title", "category", "category_alt", "user",)
list_display_links = ("title",)
def get_queryset(self, request):
qs = super(EntryOptions, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(user=request.user)
site.register(Category, CategoryOptions)
site.register(Entry, EntryOptions)
|
py | b40a11072f14de70b2d403d981e5f4389f3a8f3b | import os
class DBConfigurations:
postgres_username = "user"
postgres_password = "password"
postgres_port = 5432
postgres_db = "model_db"
postgres_server = "localhost"
sql_alchemy_database_url = (
f"postgresql://{postgres_username}:{postgres_password}@{postgres_server}:{postgres_port}/{postgres_db}"
)
class APIConfigurations:
title = os.getenv("API_TITLE", "Model_DB_Service")
description = os.getenv(
"API_DESCRIPTION", "machine learning system training patterns")
version = os.getenv("API_VERSION", "0.1")
class MachineLearningConfigurations:
model_dir = r'/Users/User/pytorch-CycleGAN-and-pix2pix/pytorch-CycleGAN-and-pix2pix'
test_dir = os.path.join(model_dir, r'datasets/facades2/test/')
train_dir = os.path.join(model_dir, r'datasets/facades2/train/')
val_dir = os.path.join(model_dir, r'datasets/facades2/val/')
result_dir = os.path.join(
model_dir, r"results/facades_pix2pix2/")
python_dir = 'C:\\JupyterLab\\resources\\jlab_server\\envs\\PyTorch\\python.exe '
paths = {
"test": test_dir,
"train": train_dir,
"result": result_dir,
"val": val_dir
}
|
py | b40a146de6ca9214a63c47ee184d4f7548648763 | from flask import redirect, url_for
from flask_login import current_user, login_required
from app import db
from app.models import User
from .. import bp
@login_required
@bp.route('/me')
def me():
return redirect(url_for('.user', id=current_user.id))
@login_required
@bp.route('/me/deactivate')
def deactivate():
current_user.delete()
db.session.commit()
return redirect(url_for('.home'))
@bp.route('/user/<id>')
def user(id):
user = User.query.get(id) or User.query.filter_by(login=id).first()
if not user:
return 'User not found', 404
return str(user)
|
py | b40a148fb95af01cd25eeec32d1036aa282b73cb | from pypy.objspace.flow.model import FunctionGraph, Constant, Variable, c_last_exception
from pypy.rlib.rarithmetic import intmask, r_uint, ovfcheck, r_longlong
from pypy.rlib.rarithmetic import r_ulonglong, is_valid_int
from pypy.rpython.lltypesystem import lltype, llmemory, lloperation, llheap
from pypy.rpython.lltypesystem import rclass
from pypy.rpython.ootypesystem import ootype
from pypy.rlib.objectmodel import ComputedIntSymbolic, CDefinedIntSymbolic
from pypy.rlib.objectmodel import Symbolic
from pypy.rlib import rstackovf
import sys, os
import math
import py
import traceback, cStringIO
log = py.log.Producer('llinterp')
class LLException(Exception):
def __init__(self, *args):
"NOT_RPYTHON"
Exception.__init__(self, *args)
def __str__(self):
etype = self.args[0]
#evalue = self.args[1]
if len(self.args) > 2:
f = cStringIO.StringIO()
original_type, original_value, original_tb = self.args[2]
traceback.print_exception(original_type, original_value, original_tb,
file=f)
extra = '\n' + f.getvalue().rstrip('\n')
extra = extra.replace('\n', '\n | ') + '\n `------'
else:
extra = ''
return '<LLException %r%s>' % (type_name(etype), extra)
class LLFatalError(Exception):
def __str__(self):
return ': '.join([str(x) for x in self.args])
def type_name(etype):
if isinstance(lltype.typeOf(etype), lltype.Ptr):
return ''.join(etype.name).rstrip('\x00')
else:
# ootype!
return etype._INSTANCE._name.split(".")[-1]
class LLInterpreter(object):
""" low level interpreter working with concrete values. """
current_interpreter = None
def __init__(self, typer, tracing=True, exc_data_ptr=None):
self.bindings = {}
self.typer = typer
# 'heap' is module or object that provides malloc, etc for lltype ops
self.heap = llheap
self.exc_data_ptr = exc_data_ptr
self.frame_stack = []
self.tracer = None
self.frame_class = LLFrame
if tracing:
self.tracer = Tracer()
def eval_graph(self, graph, args=(), recursive=False):
llframe = self.frame_class(graph, args, self)
if self.tracer and not recursive:
global tracer1
tracer1 = self.tracer
self.tracer.start()
retval = None
self.traceback_frames = []
old_frame_stack = self.frame_stack[:]
prev_interpreter = LLInterpreter.current_interpreter
LLInterpreter.current_interpreter = self
try:
try:
retval = llframe.eval()
except LLException, e:
log.error("LLEXCEPTION: %s" % (e, ))
self.print_traceback()
if self.tracer:
self.tracer.dump('LLException: %s\n' % (e,))
raise
except Exception, e:
if getattr(e, '_go_through_llinterp_uncaught_', False):
raise
log.error("AN ERROR OCCURED: %s" % (e, ))
self.print_traceback()
if self.tracer:
line = str(e)
if line:
line = ': ' + line
line = '* %s' % (e.__class__.__name__,) + line
self.tracer.dump(line + '\n')
raise
finally:
LLInterpreter.current_interpreter = prev_interpreter
assert old_frame_stack == self.frame_stack
if self.tracer:
if retval is not None:
self.tracer.dump(' ---> %r\n' % (retval,))
if not recursive:
self.tracer.stop()
return retval
def print_traceback(self):
frames = self.traceback_frames
frames.reverse()
self.traceback_frames = []
lines = []
for frame in frames:
logline = frame.graph.name + "()"
if frame.curr_block is None:
logline += " <not running yet>"
lines.append(logline)
continue
try:
logline += " " + self.typer.annotator.annotated[frame.curr_block].func.__module__
except (KeyError, AttributeError, TypeError):
logline += " <unknown module>"
lines.append(logline)
for i, operation in enumerate(frame.curr_block.operations):
if i == frame.curr_operation_index:
logline = "E %s"
else:
logline = " %s"
lines.append(logline % (operation, ))
if self.tracer:
self.tracer.dump('Traceback\n', bold=True)
for line in lines:
self.tracer.dump(line + '\n')
for line in lines:
log.traceback(line)
def find_roots(self):
"""Return a list of the addresses of the roots."""
#log.findroots("starting")
roots = []
for frame in self.frame_stack:
#log.findroots("graph", frame.graph.name)
frame.find_roots(roots)
return roots
def find_exception(self, exc):
assert isinstance(exc, LLException)
klass, inst = exc.args[0], exc.args[1]
exdata = self.typer.getexceptiondata()
frame = self.frame_class(None, [], self)
for cls in enumerate_exceptions_top_down():
evalue = frame.op_direct_call(exdata.fn_pyexcclass2exc,
lltype.pyobjectptr(cls))
etype = frame.op_direct_call(exdata.fn_type_of_exc_inst, evalue)
if etype == klass:
return cls
raise ValueError("couldn't match exception, maybe it"
" has RPython attributes like OSError?")
def get_transformed_exc_data(self, graph):
if hasattr(graph, 'exceptiontransformed'):
return graph.exceptiontransformed
if getattr(graph, 'rgenop', False):
return self.exc_data_ptr
return None
def _store_exception(self, exc):
raise PleaseOverwriteStoreException("You just invoked ll2ctypes callback without overwriting _store_exception on llinterpreter")
class PleaseOverwriteStoreException(Exception):
pass
def checkptr(ptr):
assert isinstance(lltype.typeOf(ptr), lltype.Ptr)
def checkadr(addr):
assert lltype.typeOf(addr) is llmemory.Address
def is_inst(inst):
return isinstance(lltype.typeOf(inst), (ootype.Instance, ootype.BuiltinType, ootype.StaticMethod))
def checkinst(inst):
assert is_inst(inst)
class LLFrame(object):
def __init__(self, graph, args, llinterpreter):
assert not graph or isinstance(graph, FunctionGraph)
self.graph = graph
self.args = args
self.llinterpreter = llinterpreter
self.heap = llinterpreter.heap
self.bindings = {}
self.curr_block = None
self.curr_operation_index = 0
self.alloca_objects = []
def newsubframe(self, graph, args):
return self.__class__(graph, args, self.llinterpreter)
# _______________________________________________________
# variable setters/getters helpers
def clear(self):
self.bindings.clear()
def fillvars(self, block, values):
vars = block.inputargs
assert len(vars) == len(values), (
"block %s received %d args, expected %d" % (
block, len(values), len(vars)))
for var, val in zip(vars, values):
self.setvar(var, val)
def setvar(self, var, val):
if var.concretetype is not lltype.Void:
try:
val = lltype.enforce(var.concretetype, val)
except TypeError:
assert False, "type error: input value of type:\n\n\t%r\n\n===> variable of type:\n\n\t%r\n" % (lltype.typeOf(val), var.concretetype)
assert isinstance(var, Variable)
self.bindings[var] = val
def setifvar(self, var, val):
if isinstance(var, Variable):
self.setvar(var, val)
def getval(self, varorconst):
try:
val = varorconst.value
except AttributeError:
val = self.bindings[varorconst]
if isinstance(val, ComputedIntSymbolic):
val = val.compute_fn()
if varorconst.concretetype is not lltype.Void:
try:
val = lltype.enforce(varorconst.concretetype, val)
except TypeError:
assert False, "type error: %r val from %r var/const" % (lltype.typeOf(val), varorconst.concretetype)
return val
def getval_or_subop(self, varorsubop):
from pypy.translator.oosupport.treebuilder import SubOperation
if isinstance(varorsubop, SubOperation):
self.eval_operation(varorsubop.op)
resultval = self.getval(varorsubop.op.result)
del self.bindings[varorsubop.op.result] # XXX hack
return resultval
else:
return self.getval(varorsubop)
# _______________________________________________________
# other helpers
def getoperationhandler(self, opname):
ophandler = getattr(self, 'op_' + opname, None)
if ophandler is None:
# try to import the operation from opimpl.py
ophandler = lloperation.LL_OPERATIONS[opname].fold
setattr(self.__class__, 'op_' + opname, staticmethod(ophandler))
return ophandler
# _______________________________________________________
# evaling functions
def eval(self):
graph = self.graph
tracer = self.llinterpreter.tracer
if tracer:
tracer.enter(graph)
self.llinterpreter.frame_stack.append(self)
try:
try:
nextblock = graph.startblock
args = self.args
while 1:
self.clear()
self.fillvars(nextblock, args)
nextblock, args = self.eval_block(nextblock)
if nextblock is None:
for obj in self.alloca_objects:
obj._obj._free()
return args
except Exception:
self.llinterpreter.traceback_frames.append(self)
raise
finally:
leavingframe = self.llinterpreter.frame_stack.pop()
assert leavingframe is self
if tracer:
tracer.leave()
def eval_block(self, block):
""" return (nextblock, values) tuple. If nextblock
is None, values is the concrete return value.
"""
self.curr_block = block
catch_exception = block.exitswitch == c_last_exception
e = None
try:
for i, op in enumerate(block.operations):
self.curr_operation_index = i
self.eval_operation(op)
except LLException, e:
if not (catch_exception and op is block.operations[-1]):
raise
except RuntimeError, e:
rstackovf.check_stack_overflow()
# xxx fish fish fish for proper etype and evalue to use
rtyper = self.llinterpreter.typer
bk = rtyper.annotator.bookkeeper
classdef = bk.getuniqueclassdef(rstackovf._StackOverflow)
exdata = rtyper.getexceptiondata()
evalue = exdata.get_standard_ll_exc_instance(rtyper, classdef)
etype = exdata.fn_type_of_exc_inst(evalue)
e = LLException(etype, evalue)
if not (catch_exception and op is block.operations[-1]):
raise e
# determine nextblock and/or return value
if len(block.exits) == 0:
# return block
tracer = self.llinterpreter.tracer
if len(block.inputargs) == 2:
# exception
if tracer:
tracer.dump('raise')
etypevar, evaluevar = block.getvariables()
etype = self.getval(etypevar)
evalue = self.getval(evaluevar)
# watch out, these are _ptr's
raise LLException(etype, evalue)
resultvar, = block.getvariables()
result = self.getval(resultvar)
exc_data = self.llinterpreter.get_transformed_exc_data(self.graph)
if exc_data:
# re-raise the exception set by this graph, if any
etype = exc_data.exc_type
if etype:
evalue = exc_data.exc_value
if tracer:
tracer.dump('raise')
exc_data.exc_type = lltype.typeOf(etype )._defl()
exc_data.exc_value = lltype.typeOf(evalue)._defl()
from pypy.translator import exceptiontransform
T = resultvar.concretetype
errvalue = exceptiontransform.error_value(T)
# check that the exc-transformed graph returns the error
# value when it returns with an exception set
assert result == errvalue
raise LLException(etype, evalue)
if tracer:
tracer.dump('return')
return None, result
elif block.exitswitch is None:
# single-exit block
assert len(block.exits) == 1
link = block.exits[0]
elif catch_exception:
link = block.exits[0]
if e:
exdata = self.llinterpreter.typer.getexceptiondata()
cls = e.args[0]
inst = e.args[1]
for link in block.exits[1:]:
assert issubclass(link.exitcase, py.builtin.BaseException)
if self.op_direct_call(exdata.fn_exception_match,
cls, link.llexitcase):
self.setifvar(link.last_exception, cls)
self.setifvar(link.last_exc_value, inst)
break
else:
# no handler found, pass on
raise e
else:
llexitvalue = self.getval(block.exitswitch)
if block.exits[-1].exitcase == "default":
defaultexit = block.exits[-1]
nondefaultexits = block.exits[:-1]
assert defaultexit.llexitcase is None
else:
defaultexit = None
nondefaultexits = block.exits
for link in nondefaultexits:
if link.llexitcase == llexitvalue:
break # found -- the result is in 'link'
else:
if defaultexit is None:
raise ValueError("exit case %r not found in the exit links "
"of %r" % (llexitvalue, block))
else:
link = defaultexit
return link.target, [self.getval(x) for x in link.args]
def eval_operation(self, operation):
tracer = self.llinterpreter.tracer
if tracer:
tracer.dump(str(operation))
ophandler = self.getoperationhandler(operation.opname)
# XXX slighly unnice but an important safety check
if operation.opname == 'direct_call':
assert isinstance(operation.args[0], Constant)
elif operation.opname == 'indirect_call':
assert isinstance(operation.args[0], Variable)
if getattr(ophandler, 'specialform', False):
retval = ophandler(*operation.args)
else:
vals = [self.getval_or_subop(x) for x in operation.args]
if getattr(ophandler, 'need_result_type', False):
vals.insert(0, operation.result.concretetype)
try:
retval = ophandler(*vals)
except LLException, e:
# safety check check that the operation is allowed to raise that
# exception
if operation.opname in lloperation.LL_OPERATIONS:
canraise = lloperation.LL_OPERATIONS[operation.opname].canraise
if Exception not in canraise:
exc = self.llinterpreter.find_exception(e)
for canraiseexc in canraise:
if issubclass(exc, canraiseexc):
break
else:
raise TypeError("the operation %s is not expected to raise %s" % (operation, exc))
# for exception-transformed graphs, store the LLException
# into the exc_data used by this graph
exc_data = self.llinterpreter.get_transformed_exc_data(
self.graph)
if exc_data:
etype = e.args[0]
evalue = e.args[1]
exc_data.exc_type = etype
exc_data.exc_value = evalue
from pypy.translator import exceptiontransform
retval = exceptiontransform.error_value(
operation.result.concretetype)
else:
raise
self.setvar(operation.result, retval)
if tracer:
if retval is None:
tracer.dump('\n')
else:
tracer.dump(' ---> %r\n' % (retval,))
def make_llexception(self, exc=None):
if exc is None:
original = sys.exc_info()
exc = original[1]
# it makes no sense to convert some exception classes that
# just mean something buggy crashed
if isinstance(exc, (AssertionError, AttributeError,
TypeError, NameError,
KeyboardInterrupt, SystemExit,
ImportError, SyntaxError)):
raise original[0], original[1], original[2] # re-raise it
# for testing the JIT (see ContinueRunningNormally) we need
# to let some exceptions introduced by the JIT go through
# the llinterpreter uncaught
if getattr(exc, '_go_through_llinterp_uncaught_', False):
raise original[0], original[1], original[2] # re-raise it
extraargs = (original,)
else:
extraargs = ()
typer = self.llinterpreter.typer
exdata = typer.getexceptiondata()
if isinstance(exc, OSError):
self.op_direct_call(exdata.fn_raise_OSError, exc.errno)
assert False, "op_direct_call above should have raised"
else:
exc_class = exc.__class__
evalue = self.op_direct_call(exdata.fn_pyexcclass2exc,
self.heap.pyobjectptr(exc_class))
etype = self.op_direct_call(exdata.fn_type_of_exc_inst, evalue)
raise LLException(etype, evalue, *extraargs)
def invoke_callable_with_pyexceptions(self, fptr, *args):
obj = self.llinterpreter.typer.type_system.deref(fptr)
try:
return obj._callable(*args)
except LLException, e:
raise
except Exception, e:
if getattr(e, '_go_through_llinterp_uncaught_', False):
raise
if getattr(obj, '_debugexc', False):
log.ERROR('The llinterpreter got an '
'unexpected exception when calling')
log.ERROR('the external function %r:' % (fptr,))
log.ERROR('%s: %s' % (e.__class__.__name__, e))
if self.llinterpreter.tracer:
self.llinterpreter.tracer.flush()
import sys
from pypy.translator.tool.pdbplus import PdbPlusShow
PdbPlusShow(None).post_mortem(sys.exc_info()[2])
self.make_llexception()
def find_roots(self, roots):
#log.findroots(self.curr_block.inputargs)
vars = []
for v in self.curr_block.inputargs:
if isinstance(v, Variable):
vars.append(v)
for op in self.curr_block.operations[:self.curr_operation_index]:
vars.append(op.result)
for v in vars:
TYPE = getattr(v, 'concretetype', None)
if isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc':
roots.append(_address_of_local_var(self, v))
# __________________________________________________________
# misc LL operation implementations
def op_debug_view(self, *ll_objects):
from pypy.translator.tool.lltracker import track
track(*ll_objects)
def op_debug_assert(self, x, msg):
assert x, msg
def op_debug_fatalerror(self, ll_msg, ll_exc=None):
msg = ''.join(ll_msg.chars)
if ll_exc is None:
raise LLFatalError(msg)
else:
ll_exc_type = lltype.cast_pointer(rclass.OBJECTPTR, ll_exc).typeptr
raise LLFatalError(msg, LLException(ll_exc_type, ll_exc))
def op_debug_llinterpcall(self, pythonfunction, *args_ll):
try:
return pythonfunction(*args_ll)
except:
self.make_llexception()
def op_debug_start_traceback(self, *args):
pass # xxx write debugging code here?
def op_debug_reraise_traceback(self, *args):
pass # xxx write debugging code here?
def op_debug_record_traceback(self, *args):
pass # xxx write debugging code here?
def op_debug_print_traceback(self, *args):
pass # xxx write debugging code here?
def op_debug_catch_exception(self, *args):
pass # xxx write debugging code here?
def op_jit_marker(self, *args):
pass
def op_jit_record_known_class(self, *args):
pass
def op_get_exception_addr(self, *args):
raise NotImplementedError
def op_get_exc_value_addr(self, *args):
raise NotImplementedError
def op_instrument_count(self, ll_tag, ll_label):
pass # xxx for now
def op_keepalive(self, value):
pass
def op_hint(self, x, hints):
return x
def op_decode_arg(self, fname, i, name, vargs, vkwds):
raise NotImplementedError("decode_arg")
def op_decode_arg_def(self, fname, i, name, vargs, vkwds, default):
raise NotImplementedError("decode_arg_def")
def op_check_no_more_arg(self, fname, n, vargs):
raise NotImplementedError("check_no_more_arg")
def op_getslice(self, vargs, start, stop_should_be_None):
raise NotImplementedError("getslice") # only for argument parsing
def op_check_self_nonzero(self, fname, vself):
raise NotImplementedError("check_self_nonzero")
def op_setfield(self, obj, fieldname, fieldvalue):
# obj should be pointer
FIELDTYPE = getattr(lltype.typeOf(obj).TO, fieldname)
if FIELDTYPE is not lltype.Void:
self.heap.setfield(obj, fieldname, fieldvalue)
def op_bare_setfield(self, obj, fieldname, fieldvalue):
# obj should be pointer
FIELDTYPE = getattr(lltype.typeOf(obj).TO, fieldname)
if FIELDTYPE is not lltype.Void:
setattr(obj, fieldname, fieldvalue)
def op_getinteriorfield(self, obj, *offsets):
checkptr(obj)
ob = obj
for o in offsets:
if isinstance(o, str):
ob = getattr(ob, o)
else:
ob = ob[o]
assert not isinstance(ob, lltype._interior_ptr)
return ob
def getinneraddr(self, obj, *offsets):
TYPE = lltype.typeOf(obj).TO
addr = llmemory.cast_ptr_to_adr(obj)
for o in offsets:
if isinstance(o, str):
addr += llmemory.offsetof(TYPE, o)
TYPE = getattr(TYPE, o)
else:
addr += llmemory.itemoffsetof(TYPE, o)
TYPE = TYPE.OF
return addr, TYPE
def op_setinteriorfield(self, obj, *fieldnamesval):
offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1]
inneraddr, FIELD = self.getinneraddr(obj, *offsets)
if FIELD is not lltype.Void:
self.heap.setinterior(obj, inneraddr, FIELD, fieldvalue, offsets)
def op_bare_setinteriorfield(self, obj, *fieldnamesval):
offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1]
inneraddr, FIELD = self.getinneraddr(obj, *offsets)
if FIELD is not lltype.Void:
llheap.setinterior(obj, inneraddr, FIELD, fieldvalue)
def op_getarrayitem(self, array, index):
return array[index]
def op_setarrayitem(self, array, index, item):
# array should be a pointer
ITEMTYPE = lltype.typeOf(array).TO.OF
if ITEMTYPE is not lltype.Void:
self.heap.setarrayitem(array, index, item)
def op_bare_setarrayitem(self, array, index, item):
# array should be a pointer
ITEMTYPE = lltype.typeOf(array).TO.OF
if ITEMTYPE is not lltype.Void:
array[index] = item
def perform_call(self, f, ARGS, args):
fobj = self.llinterpreter.typer.type_system.deref(f)
has_callable = getattr(fobj, '_callable', None) is not None
if hasattr(fobj, 'graph'):
graph = fobj.graph
else:
assert has_callable, "don't know how to execute %r" % f
return self.invoke_callable_with_pyexceptions(f, *args)
args_v = graph.getargs()
if len(ARGS) != len(args_v):
raise TypeError("graph with %d args called with wrong func ptr type: %r" %(len(args_v), ARGS))
for T, v in zip(ARGS, args_v):
if not lltype.isCompatibleType(T, v.concretetype):
raise TypeError("graph with %r args called with wrong func ptr type: %r" %
(tuple([v.concretetype for v in args_v]), ARGS))
frame = self.newsubframe(graph, args)
return frame.eval()
def op_direct_call(self, f, *args):
FTYPE = self.llinterpreter.typer.type_system.derefType(lltype.typeOf(f))
return self.perform_call(f, FTYPE.ARGS, args)
def op_indirect_call(self, f, *args):
graphs = args[-1]
args = args[:-1]
if graphs is not None:
obj = self.llinterpreter.typer.type_system.deref(f)
if hasattr(obj, 'graph'):
assert obj.graph in graphs
else:
pass
#log.warn("op_indirect_call with graphs=None:", f)
return self.op_direct_call(f, *args)
def op_malloc(self, obj, flags):
flavor = flags['flavor']
zero = flags.get('zero', False)
track_allocation = flags.get('track_allocation', True)
if flavor == "stack":
result = self.heap.malloc(obj, zero=zero, flavor='raw')
self.alloca_objects.append(result)
return result
ptr = self.heap.malloc(obj, zero=zero, flavor=flavor,
track_allocation=track_allocation)
return ptr
def op_malloc_varsize(self, obj, flags, size):
flavor = flags['flavor']
zero = flags.get('zero', False)
track_allocation = flags.get('track_allocation', True)
assert flavor in ('gc', 'raw')
try:
ptr = self.heap.malloc(obj, size, zero=zero, flavor=flavor,
track_allocation=track_allocation)
return ptr
except MemoryError:
self.make_llexception()
def op_malloc_nonmovable(self, TYPE, flags):
flavor = flags['flavor']
assert flavor == 'gc'
zero = flags.get('zero', False)
return self.heap.malloc_nonmovable(TYPE, zero=zero)
def op_malloc_nonmovable_varsize(self, TYPE, flags, size):
flavor = flags['flavor']
assert flavor == 'gc'
zero = flags.get('zero', False)
return self.heap.malloc_nonmovable(TYPE, size, zero=zero)
def op_free(self, obj, flags):
assert flags['flavor'] == 'raw'
track_allocation = flags.get('track_allocation', True)
self.heap.free(obj, flavor='raw', track_allocation=track_allocation)
def op_gc_add_memory_pressure(self, size):
self.heap.add_memory_pressure(size)
def op_shrink_array(self, obj, smallersize):
return self.heap.shrink_array(obj, smallersize)
def op_zero_gc_pointers_inside(self, obj):
raise NotImplementedError("zero_gc_pointers_inside")
def op_gc_writebarrier_before_copy(self, source, dest,
source_start, dest_start, length):
if hasattr(self.heap, 'writebarrier_before_copy'):
return self.heap.writebarrier_before_copy(source, dest,
source_start, dest_start,
length)
else:
return True
def op_getfield(self, obj, field):
checkptr(obj)
# check the difference between op_getfield and op_getsubstruct:
assert not isinstance(getattr(lltype.typeOf(obj).TO, field),
lltype.ContainerType)
return getattr(obj, field)
def op_force_cast(self, RESTYPE, obj):
from pypy.rpython.lltypesystem import ll2ctypes
return ll2ctypes.force_cast(RESTYPE, obj)
op_force_cast.need_result_type = True
def op_cast_int_to_ptr(self, RESTYPE, int1):
return lltype.cast_int_to_ptr(RESTYPE, int1)
op_cast_int_to_ptr.need_result_type = True
def op_cast_ptr_to_int(self, ptr1):
checkptr(ptr1)
return lltype.cast_ptr_to_int(ptr1)
def op_cast_opaque_ptr(self, RESTYPE, obj):
checkptr(obj)
return lltype.cast_opaque_ptr(RESTYPE, obj)
op_cast_opaque_ptr.need_result_type = True
def op_cast_ptr_to_adr(self, ptr):
checkptr(ptr)
return llmemory.cast_ptr_to_adr(ptr)
def op_cast_adr_to_int(self, adr, mode):
checkadr(adr)
return llmemory.cast_adr_to_int(adr, mode)
def op_convert_float_bytes_to_longlong(self, f):
from pypy.rlib import longlong2float
return longlong2float.float2longlong(f)
def op_weakref_create(self, v_obj):
def objgetter(): # special support for gcwrapper.py
return self.getval(v_obj)
return self.heap.weakref_create_getlazy(objgetter)
op_weakref_create.specialform = True
def op_weakref_deref(self, PTRTYPE, obj):
return self.heap.weakref_deref(PTRTYPE, obj)
op_weakref_deref.need_result_type = True
def op_cast_ptr_to_weakrefptr(self, obj):
return llmemory.cast_ptr_to_weakrefptr(obj)
def op_cast_weakrefptr_to_ptr(self, PTRTYPE, obj):
return llmemory.cast_weakrefptr_to_ptr(PTRTYPE, obj)
op_cast_weakrefptr_to_ptr.need_result_type = True
def op_gc__collect(self, *gen):
self.heap.collect(*gen)
def op_gc_heap_stats(self):
raise NotImplementedError
def op_gc_obtain_free_space(self, size):
raise NotImplementedError
def op_gc_can_move(self, ptr):
addr = llmemory.cast_ptr_to_adr(ptr)
return self.heap.can_move(addr)
def op_gc_thread_prepare(self):
self.heap.thread_prepare()
def op_gc_thread_run(self):
self.heap.thread_run()
def op_gc_thread_start(self):
self.heap.thread_start()
def op_gc_thread_die(self):
self.heap.thread_die()
def op_gc_thread_before_fork(self):
raise NotImplementedError
def op_gc_thread_after_fork(self):
raise NotImplementedError
def op_gc_free(self, addr):
# what can you do?
pass
#raise NotImplementedError("gc_free")
def op_gc_fetch_exception(self):
raise NotImplementedError("gc_fetch_exception")
def op_gc_restore_exception(self, exc):
raise NotImplementedError("gc_restore_exception")
def op_gc_adr_of_nursery_top(self):
raise NotImplementedError
def op_gc_adr_of_nursery_free(self):
raise NotImplementedError
def op_gc_adr_of_root_stack_base(self):
raise NotImplementedError
def op_gc_adr_of_root_stack_top(self):
raise NotImplementedError
def op_gc_call_rtti_destructor(self, rtti, addr):
if hasattr(rtti._obj, 'destructor_funcptr'):
d = rtti._obj.destructor_funcptr
obptr = addr.ref()
return self.op_direct_call(d, obptr)
def op_gc_deallocate(self, TYPE, addr):
raise NotImplementedError("gc_deallocate")
def op_gc_push_alive_pyobj(self, pyobj):
raise NotImplementedError("gc_push_alive_pyobj")
def op_gc_pop_alive_pyobj(self, pyobj):
raise NotImplementedError("gc_pop_alive_pyobj")
def op_gc_reload_possibly_moved(self, v_newaddr, v_ptr):
assert v_newaddr.concretetype is llmemory.Address
assert isinstance(v_ptr.concretetype, lltype.Ptr)
assert v_ptr.concretetype.TO._gckind == 'gc'
newaddr = self.getval(v_newaddr)
p = llmemory.cast_adr_to_ptr(newaddr, v_ptr.concretetype)
if isinstance(v_ptr, Constant):
assert v_ptr.value == p
else:
self.setvar(v_ptr, p)
op_gc_reload_possibly_moved.specialform = True
def op_gc_identityhash(self, obj):
return lltype.identityhash(obj)
def op_gc_id(self, ptr):
PTR = lltype.typeOf(ptr)
if isinstance(PTR, lltype.Ptr):
return self.heap.gc_id(ptr)
elif isinstance(PTR, ootype.OOType):
return ootype.identityhash(ptr) # XXX imprecise
raise NotImplementedError("gc_id on %r" % (PTR,))
def op_gc_set_max_heap_size(self, maxsize):
raise NotImplementedError("gc_set_max_heap_size")
def op_gc_asmgcroot_static(self, index):
raise NotImplementedError("gc_asmgcroot_static")
def op_gc_stack_bottom(self):
pass # marker for trackgcroot.py
def op_gc_shadowstackref_new(self): # stacklet+shadowstack
raise NotImplementedError("gc_shadowstackref_new")
def op_gc_shadowstackref_context(self):
raise NotImplementedError("gc_shadowstackref_context")
def op_gc_shadowstackref_destroy(self):
raise NotImplementedError("gc_shadowstackref_destroy")
def op_gc_save_current_state_away(self):
raise NotImplementedError("gc_save_current_state_away")
def op_gc_forget_current_state(self):
raise NotImplementedError("gc_forget_current_state")
def op_gc_restore_state_from(self):
raise NotImplementedError("gc_restore_state_from")
def op_gc_start_fresh_new_state(self):
raise NotImplementedError("gc_start_fresh_new_state")
def op_gc_get_type_info_group(self):
raise NotImplementedError("gc_get_type_info_group")
def op_gc_get_rpy_memory_usage(self):
raise NotImplementedError("gc_get_rpy_memory_usage")
def op_gc_get_rpy_roots(self):
raise NotImplementedError("gc_get_rpy_roots")
def op_gc_get_rpy_referents(self):
raise NotImplementedError("gc_get_rpy_referents")
def op_gc_is_rpy_instance(self):
raise NotImplementedError("gc_is_rpy_instance")
def op_gc_get_rpy_type_index(self):
raise NotImplementedError("gc_get_rpy_type_index")
def op_gc_dump_rpy_heap(self):
raise NotImplementedError("gc_dump_rpy_heap")
def op_gc_typeids_z(self):
raise NotImplementedError("gc_typeids_z")
def op_do_malloc_fixedsize_clear(self):
raise NotImplementedError("do_malloc_fixedsize_clear")
def op_do_malloc_varsize_clear(self):
raise NotImplementedError("do_malloc_varsize_clear")
def op_get_write_barrier_failing_case(self):
raise NotImplementedError("get_write_barrier_failing_case")
def op_get_write_barrier_from_array_failing_case(self):
raise NotImplementedError("get_write_barrier_from_array_failing_case")
def op_stack_current(self):
return 0
# operations on pyobjects!
for opname in lloperation.opimpls.keys():
exec py.code.Source("""
def op_%(opname)s(self, *pyobjs):
for pyo in pyobjs:
assert lltype.typeOf(pyo) == lltype.Ptr(lltype.PyObject)
func = lloperation.opimpls[%(opname)r]
try:
pyo = func(*[pyo._obj.value for pyo in pyobjs])
except Exception:
self.make_llexception()
return self.heap.pyobjectptr(pyo)
""" % locals()).compile()
del opname
def op_simple_call(self, f, *args):
assert lltype.typeOf(f) == lltype.Ptr(lltype.PyObject)
for pyo in args:
assert lltype.typeOf(pyo) == lltype.Ptr(lltype.PyObject)
res = f._obj.value(*[pyo._obj.value for pyo in args])
return self.heap.pyobjectptr(res)
# __________________________________________________________
# operations on addresses
def op_raw_malloc(self, size):
assert lltype.typeOf(size) == lltype.Signed
return llmemory.raw_malloc(size)
op_boehm_malloc = op_boehm_malloc_atomic = op_raw_malloc
def op_boehm_register_finalizer(self, p, finalizer):
pass
def op_boehm_disappearing_link(self, link, obj):
pass
def op_raw_malloc_usage(self, size):
assert lltype.typeOf(size) == lltype.Signed
return llmemory.raw_malloc_usage(size)
def op_raw_free(self, addr):
checkadr(addr)
llmemory.raw_free(addr)
def op_raw_memclear(self, addr, size):
checkadr(addr)
llmemory.raw_memclear(addr, size)
def op_raw_memcopy(self, fromaddr, toaddr, size):
checkadr(fromaddr)
checkadr(toaddr)
llmemory.raw_memcopy(fromaddr, toaddr, size)
op_raw_memmove = op_raw_memcopy # this is essentially the same here
def op_raw_load(self, addr, typ, offset):
checkadr(addr)
value = getattr(addr, str(typ).lower())[offset]
assert lltype.typeOf(value) == typ
return value
def op_raw_store(self, addr, typ, offset, value):
checkadr(addr)
assert lltype.typeOf(value) == typ
getattr(addr, str(typ).lower())[offset] = value
def op_stack_malloc(self, size): # mmh
raise NotImplementedError("backend only")
def op_track_alloc_start(self, addr):
# we don't do tracking at this level
checkadr(addr)
def op_track_alloc_stop(self, addr):
checkadr(addr)
# ____________________________________________________________
# Overflow-detecting variants
def op_int_neg_ovf(self, x):
assert is_valid_int(x)
try:
return ovfcheck(-x)
except OverflowError:
self.make_llexception()
def op_int_abs_ovf(self, x):
assert is_valid_int(x)
try:
return ovfcheck(abs(x))
except OverflowError:
self.make_llexception()
def op_int_lshift_ovf(self, x, y):
assert is_valid_int(x)
assert is_valid_int(y)
try:
return ovfcheck(x << y)
except OverflowError:
self.make_llexception()
def _makefunc2(fn, operator, xtype, ytype=None):
import sys
d = sys._getframe(1).f_locals
if ytype is None:
ytype = xtype
if '_ovf' in fn:
checkfn = 'ovfcheck'
elif fn.startswith('op_int_'):
checkfn = 'intmask'
else:
checkfn = ''
if operator == '//':
code = '''r = %(checkfn)s(x // y)
if x^y < 0 and x%%y != 0:
r += 1
return r
'''%locals()
elif operator == '%':
## overflow check on % does not work with emulated int
code = '''%(checkfn)s(x // y)
r = x %% y
if x^y < 0 and x%%y != 0:
r -= y
return r
'''%locals()
else:
code = 'return %(checkfn)s(x %(operator)s y)'%locals()
exec py.code.Source("""
def %(fn)s(self, x, y):
assert isinstance(x, %(xtype)s)
assert isinstance(y, %(ytype)s)
try:
%(code)s
except (OverflowError, ValueError, ZeroDivisionError):
self.make_llexception()
""" % locals()).compile() in globals(), d
_makefunc2('op_int_add_ovf', '+', '(int, long, llmemory.AddressOffset)')
_makefunc2('op_int_mul_ovf', '*', '(int, long, llmemory.AddressOffset)', '(int, long)')
_makefunc2('op_int_sub_ovf', '-', '(int, long)')
_makefunc2('op_int_floordiv_ovf', '//', '(int, long)') # XXX negative args
_makefunc2('op_int_floordiv_zer', '//', '(int, long)') # can get off-by-one
_makefunc2('op_int_floordiv_ovf_zer', '//', '(int, long)') # (see op_int_floordiv)
_makefunc2('op_int_mod_ovf', '%', '(int, long)')
_makefunc2('op_int_mod_zer', '%', '(int, long)')
_makefunc2('op_int_mod_ovf_zer', '%', '(int, long)')
_makefunc2('op_uint_floordiv_zer', '//', 'r_uint')
_makefunc2('op_uint_mod_zer', '%', 'r_uint')
_makefunc2('op_llong_floordiv_zer', '//', 'r_longlong')
_makefunc2('op_llong_mod_zer', '%', 'r_longlong')
_makefunc2('op_ullong_floordiv_zer', '//', 'r_ulonglong')
_makefunc2('op_ullong_mod_zer', '%', 'r_ulonglong')
def op_int_add_nonneg_ovf(self, x, y):
if isinstance(y, int):
assert y >= 0
return self.op_int_add_ovf(x, y)
def op_int_is_true(self, x):
# special case
if type(x) is CDefinedIntSymbolic:
x = x.default
# if type(x) is a subclass of Symbolic, bool(x) will usually raise
# a TypeError -- unless __nonzero__ has been explicitly overridden.
assert is_valid_int(x) or isinstance(x, Symbolic)
return bool(x)
# hack for jit.codegen.llgraph
def op_check_and_clear_exc(self):
exc_data = self.llinterpreter.get_transformed_exc_data(self.graph)
assert exc_data
etype = exc_data.exc_type
evalue = exc_data.exc_value
exc_data.exc_type = lltype.typeOf(etype )._defl()
exc_data.exc_value = lltype.typeOf(evalue)._defl()
return bool(etype)
#Operation of ootype
def op_new(self, INST):
assert isinstance(INST, (ootype.Instance, ootype.BuiltinType))
return ootype.new(INST)
def op_oonewarray(self, ARRAY, length):
assert isinstance(ARRAY, ootype.Array)
assert is_valid_int(length)
return ootype.oonewarray(ARRAY, length)
def op_runtimenew(self, class_):
return ootype.runtimenew(class_)
def op_oonewcustomdict(self, DICT, eq_func, eq_obj, eq_method_name,
hash_func, hash_obj, hash_method_name):
eq_name, interp_eq = \
wrap_callable(self.llinterpreter, eq_func, eq_obj, eq_method_name)
EQ_FUNC = ootype.StaticMethod([DICT._KEYTYPE, DICT._KEYTYPE], ootype.Bool)
sm_eq = ootype.static_meth(EQ_FUNC, eq_name, _callable=interp_eq)
hash_name, interp_hash = \
wrap_callable(self.llinterpreter, hash_func, hash_obj, hash_method_name)
HASH_FUNC = ootype.StaticMethod([DICT._KEYTYPE], ootype.Signed)
sm_hash = ootype.static_meth(HASH_FUNC, hash_name, _callable=interp_hash)
# XXX: is it fine to have StaticMethod type for bound methods, too?
return ootype.oonewcustomdict(DICT, sm_eq, sm_hash)
def op_oosetfield(self, inst, name, value):
checkinst(inst)
assert isinstance(name, str)
FIELDTYPE = lltype.typeOf(inst)._field_type(name)
if FIELDTYPE is not lltype.Void:
setattr(inst, name, value)
def op_oogetfield(self, inst, name):
checkinst(inst)
assert isinstance(name, str)
return getattr(inst, name)
def op_oosend(self, message, inst, *args):
checkinst(inst)
assert isinstance(message, str)
bm = getattr(inst, message)
inst = bm.inst
m = bm.meth
args = m._checkargs(args, check_callable=False)
if getattr(m, 'abstract', False):
raise RuntimeError("calling abstract method %r" % (m,))
return self.perform_call(m, (lltype.typeOf(inst),)+lltype.typeOf(m).ARGS, [inst]+args)
def op_oostring(self, obj, base):
return ootype.oostring(obj, base)
def op_oounicode(self, obj, base):
try:
return ootype.oounicode(obj, base)
except UnicodeDecodeError:
self.make_llexception()
def op_ooparse_int(self, s, base):
try:
return ootype.ooparse_int(s, base)
except ValueError:
self.make_llexception()
def op_ooparse_float(self, s):
try:
return ootype.ooparse_float(s)
except ValueError:
self.make_llexception()
def op_oobox_int(self, i):
return ootype.oobox_int(i)
def op_oounbox_int(self, x):
return ootype.oounbox_int(x)
class Tracer(object):
Counter = 0
file = None
TRACE = int(os.getenv('PYPY_TRACE') or '0')
HEADER = """<html><head>
<script language=javascript type='text/javascript'>
function togglestate(n) {
var item = document.getElementById('div'+n)
if (item.style.display == 'none')
item.style.display = 'block';
else
item.style.display = 'none';
}
function toggleall(lst) {
for (var i = 0; i<lst.length; i++) {
togglestate(lst[i]);
}
}
</script>
</head>
<body><pre>
"""
FOOTER = """</pre>
<script language=javascript type='text/javascript'>
toggleall(%r);
</script>
</body></html>"""
ENTER = ('''\n\t<a href="javascript:togglestate(%d)">%s</a>'''
'''\n<div id="div%d" style="display: %s">\t''')
LEAVE = '''\n</div>\t'''
def htmlquote(self, s, text_to_html={}):
# HTML quoting, lazily initialized
if not text_to_html:
import htmlentitydefs
for key, value in htmlentitydefs.entitydefs.items():
text_to_html[value] = '&' + key + ';'
return ''.join([text_to_html.get(c, c) for c in s])
def start(self):
# start of a dump file
if not self.TRACE:
return
from pypy.tool.udir import udir
n = Tracer.Counter
Tracer.Counter += 1
filename = 'llinterp_trace_%d.html' % n
self.file = udir.join(filename).open('w')
print >> self.file, self.HEADER
linkname = str(udir.join('llinterp_trace.html'))
try:
os.unlink(linkname)
except OSError:
pass
try:
os.symlink(filename, linkname)
except (AttributeError, OSError):
pass
self.count = 0
self.indentation = ''
self.depth = 0
self.latest_call_chain = []
def stop(self):
# end of a dump file
if self.file:
print >> self.file, self.FOOTER % (self.latest_call_chain[1:])
self.file.close()
self.file = None
def enter(self, graph):
# enter evaluation of a graph
if self.file:
del self.latest_call_chain[self.depth:]
self.depth += 1
self.latest_call_chain.append(self.count)
s = self.htmlquote(str(graph))
i = s.rfind(')')
s = s[:i+1] + '<b>' + s[i+1:] + '</b>'
if self.count == 0:
display = 'block'
else:
display = 'none'
text = self.ENTER % (self.count, s, self.count, display)
self.indentation += ' '
self.file.write(text.replace('\t', self.indentation))
self.count += 1
def leave(self):
# leave evaluation of a graph
if self.file:
self.indentation = self.indentation[:-4]
self.file.write(self.LEAVE.replace('\t', self.indentation))
self.depth -= 1
def dump(self, text, bold=False):
if self.file:
text = self.htmlquote(text)
if bold:
text = '<b>%s</b>' % (text,)
self.file.write(text.replace('\n', '\n'+self.indentation))
def flush(self):
if self.file:
self.file.flush()
def wrap_callable(llinterpreter, fn, obj, method_name):
if method_name is None:
# fn is a StaticMethod
if obj is not None:
self_arg = [obj]
else:
self_arg = []
func_graph = fn.graph
else:
# obj is an instance, we want to call 'method_name' on it
assert fn is None
self_arg = [obj]
func_graph = obj._TYPE._methods[method_name._str].graph
return wrap_graph(llinterpreter, func_graph, self_arg)
def wrap_graph(llinterpreter, graph, self_arg):
"""
Returns a callable that inteprets the given func or method_name when called.
"""
def interp_func(*args):
graph_args = self_arg + list(args)
return llinterpreter.eval_graph(graph, args=graph_args)
interp_func.graph = graph
interp_func.self_arg = self_arg
return graph.name, interp_func
def enumerate_exceptions_top_down():
import exceptions
result = []
seen = {}
def addcls(cls):
if (type(cls) is type(Exception) and
issubclass(cls, py.builtin.BaseException)):
if cls in seen:
return
for base in cls.__bases__: # bases first
addcls(base)
result.append(cls)
seen[cls] = True
for cls in exceptions.__dict__.values():
addcls(cls)
return result
class _address_of_local_var(object):
_TYPE = llmemory.Address
def __init__(self, frame, v):
self._frame = frame
self._v = v
def _getaddress(self):
return _address_of_local_var_accessor(self._frame, self._v)
address = property(_getaddress)
class _address_of_local_var_accessor(object):
def __init__(self, frame, v):
self.frame = frame
self.v = v
def __getitem__(self, index):
if index != 0:
raise IndexError("address of local vars only support [0] indexing")
p = self.frame.getval(self.v)
result = llmemory.cast_ptr_to_adr(p)
# the GC should never see instances of _gctransformed_wref
result = self.unwrap_possible_weakref(result)
return result
def __setitem__(self, index, newvalue):
if index != 0:
raise IndexError("address of local vars only support [0] indexing")
if self.v.concretetype == llmemory.WeakRefPtr:
# fish some more
assert isinstance(newvalue, llmemory.fakeaddress)
p = llmemory.cast_ptr_to_weakrefptr(newvalue.ptr)
else:
p = llmemory.cast_adr_to_ptr(newvalue, self.v.concretetype)
self.frame.setvar(self.v, p)
def unwrap_possible_weakref(self, addr):
# fish fish fish
if addr and isinstance(addr.ptr._obj, llmemory._gctransformed_wref):
return llmemory.fakeaddress(addr.ptr._obj._ptr)
return addr
# by default we route all logging messages to nothingness
# e.g. tests can then switch on logging to get more help
# for failing tests
from pypy.tool.ansi_print import ansi_log
py.log.setconsumer('llinterp', ansi_log)
|
py | b40a14b0d3a8942e06e9b443da4b0a61ea63abb9 | '''
======================
Queue ordering options
======================
Using a sequence of wav files, we can demonstrate the various ordering options.
For all demonstrates, we assume that three trials each of six stimuli (A, B, C,
D, E, and F) have been queued.
'''
import textwrap
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from psiaudio.stim import wavs_from_path
from psiaudio.queue import (BlockedRandomSignalQueue, BlockedFIFOSignalQueue,
GroupedFIFOSignalQueue, FIFOSignalQueue)
###############################################################################
# First, let's load the wav files. A utility function is provided that scans a
# particular folder for all wav files and returns a list of ``WavFile``
# instances (i.e., a subclass of ``Waveform``). Queues require all stimuli
# to be a subclass of ``Waveform``).
fs = 100e3
base_path = '../wav-files'
wavfiles = wavs_from_path(fs, base_path)
# Plot each waveform to illustrate what the individual stimuli look like.
figure, axes = plt.subplots(2, 3, figsize=(10, 6), sharex=True, sharey=True)
for ax, w in zip(axes.flat, wavfiles):
w.reset()
waveform = w.get_samples_remaining()
t = np.arange(waveform.shape[-1]) / w.fs
ax.plot(t, waveform)
title = textwrap.fill(w.filename.stem, 20)
ax.set_title(title)
for ax in axes[:, 0]:
ax.set_ylabel('Signal (V)')
for ax in axes[-1]:
ax.set_xlabel('Time (sec)')
figure.tight_layout()
###############################################################################
# Now, calculate how many samples we want to pull out of the queue on each call
# to ``AbstractSignalQueue.pop_buffer``.
n_samples = sum(w.n_samples() for w in wavfiles)
###############################################################################
# We also create a utility function to plot the queue contents. This function
# calls ``queue.pop_buffer`` six times and plots the result. These samples can
# be used, for example, to "feed" the portaudio output buffer which has a
# callback that requests a fresh number of samples at a fixed interval. Note
# that the final call returns a sequence of zeros since we have presented the
# requested number of trials for each stimuli.
def plot_queue(queue, n_samples):
t = np.arange(n_samples) / queue.fs
figure, axes = plt.subplots(4, 1, figsize=(10, 10), sharex=True,
sharey=True)
for i, ax in enumerate(axes.flat):
waveform = queue.pop_buffer(n_samples)
ax.plot(t, waveform)
ax.set_title(f'Call {i+1}')
ax.set_ylabel('Signal')
axes[-1].set_xlabel('Time (sec)')
figure.tight_layout()
###############################################################################
# The most basic queue is ``FIFOSignalQueue``. The first stimulus is presented
# for the specified number of trials before advancing to the next stimuli. The
# ordering of the stimuli will be:
#
# A A A B B B C C C D D D E E E F F F
queue = FIFOSignalQueue(fs)
queue.extend(wavfiles, trials=3)
plot_queue(queue, n_samples)
###############################################################################
# The next type of queue is ``BlockedFIFOSignalQueue``. The stimuli are
# interleaved (in the order they were queued). All stimuli are presented before
# advancing to the next trial.
#
# A B C D E F A B C D E F A B C D E F
queue = BlockedFIFOSignalQueue(fs)
queue.extend(wavfiles, 3)
plot_queue(queue, n_samples)
###############################################################################
# To modify the block size, use ``GroupedFIFOSignalQueue``. Like BlockedFIFO
# stimuli will be presented in groups, but you can manually set the group size
# to create sub-blocks that are presented before advancing to the next sublock.
# In the following example, the group size is 3, creating two sub-blocks:
#
# A B C A B C A B C D E F D E F D E F
queue = GroupedFIFOSignalQueue(group_size=3, fs=fs)
queue.extend(wavfiles, 3)
plot_queue(queue, n_samples)
###############################################################################
# We can also randomize stimuli within each block using
# ``BlockedRandomSignalQueue``.
queue = BlockedRandomSignalQueue(fs)
queue.extend(wavfiles, 3)
plot_queue(queue, n_samples)
plt.show()
|
py | b40a14d5bcae9e585371afad5f1a9d856f824007 | """
send coroutine testing.
"""
import pytest
from aiosmtplib import send
pytestmark = pytest.mark.asyncio()
async def test_send(hostname, smtpd_server_port, message, received_messages):
errors, response = await send(message, hostname=hostname, port=smtpd_server_port)
assert not errors
assert len(received_messages) == 1
async def test_send_with_str(hostname, smtpd_server_port, message, received_messages):
errors, response = await send(
str(message),
hostname=hostname,
port=smtpd_server_port,
sender=message["From"],
recipients=[message["To"]],
)
assert not errors
assert len(received_messages) == 1
async def test_send_with_bytes(hostname, smtpd_server_port, message, received_messages):
errors, response = await send(
bytes(message),
hostname=hostname,
port=smtpd_server_port,
sender=message["From"],
recipients=[message["To"]],
)
assert not errors
assert len(received_messages) == 1
async def test_send_without_sender(
hostname, smtpd_server_port, message, received_messages
):
with pytest.raises(ValueError):
errors, response = await send(
bytes(message),
hostname=hostname,
port=smtpd_server_port,
sender=None,
recipients=[message["To"]],
)
async def test_send_without_recipients(
hostname, smtpd_server_port, message, received_messages
):
with pytest.raises(ValueError):
errors, response = await send(
bytes(message),
hostname=hostname,
port=smtpd_server_port,
sender=message["From"],
recipients=[],
)
async def test_send_with_start_tls(
hostname, smtpd_server_port, message, received_messages, received_commands
):
errors, response = await send(
message,
hostname=hostname,
port=smtpd_server_port,
start_tls=True,
validate_certs=False,
)
assert not errors
assert "STARTTLS" in [command[0] for command in received_commands]
assert len(received_messages) == 1
async def test_send_with_login(
hostname, smtpd_server_port, message, received_messages, received_commands
):
errors, response = await send( # nosec
message,
hostname=hostname,
port=smtpd_server_port,
start_tls=True,
validate_certs=False,
username="test",
password="test",
)
assert not errors
assert "AUTH" in [command[0] for command in received_commands]
assert len(received_messages) == 1
|
py | b40a174b48e53b8c0c7265bf51a1bcb205e7adab | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests that our version shims in backward.py are working."""
from coverage.backward import iitems, binary_bytes, bytes_to_ints
from tests.coveragetest import CoverageTest
from tests.helpers import assert_count_equal
class BackwardTest(CoverageTest):
"""Tests of things from backward.py."""
def test_iitems(self):
d = {'a': 1, 'b': 2, 'c': 3}
items = [('a', 1), ('b', 2), ('c', 3)]
assert_count_equal(list(iitems(d)), items)
def test_binary_bytes(self):
byte_values = [0, 255, 17, 23, 42, 57]
bb = binary_bytes(byte_values)
assert len(bb) == len(byte_values)
assert byte_values == list(bytes_to_ints(bb))
|
py | b40a1789a1ba6c5a14f4fbd2691861d7d2c658d3 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# Author: Nicolas P .Rougier
# Date: 04/03/2014
# -----------------------------------------------------------------------------
import numpy as np
from vispy import gloo, app
from vispy.gloo import Program, VertexBuffer, IndexBuffer
from vispy.util.transforms import perspective, translate, rotate
from vispy.geometry import create_cube
vertex = """
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform sampler2D texture;
attribute vec3 position;
attribute vec2 texcoord;
attribute vec3 normal;
attribute vec4 color;
varying vec2 v_texcoord;
void main()
{
gl_Position = projection * view * model * vec4(position,1.0);
v_texcoord = texcoord;
}
"""
fragment = """
uniform sampler2D texture;
varying vec2 v_texcoord;
void main()
{
gl_FragColor = texture2D(texture, v_texcoord);
}
"""
def checkerboard(grid_num=8, grid_size=32):
row_even = grid_num // 2 * [0, 1]
row_odd = grid_num // 2 * [1, 0]
Z = np.row_stack(grid_num // 2 * (row_even, row_odd)).astype(np.uint8)
return 255 * Z.repeat(grid_size, axis=0).repeat(grid_size, axis=1)
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, size=(512, 512), title='Textured cube',
keys='interactive')
self.timer = app.Timer('auto', self.on_timer)
def on_initialize(self, event):
# Build cube data
V, I, _ = create_cube()
vertices = VertexBuffer(V)
self.indices = IndexBuffer(I)
# Build program
self.program = Program(vertex, fragment)
self.program.bind(vertices)
# Build view, model, projection & normal
view = np.eye(4, dtype=np.float32)
model = np.eye(4, dtype=np.float32)
translate(view, 0, 0, -5)
self.program['model'] = model
self.program['view'] = view
self.program['texture'] = checkerboard()
self.phi, self.theta = 0, 0
# OpenGL initalization
gloo.set_state(clear_color=(0.30, 0.30, 0.35, 1.00), depth_test=True)
self.timer.start()
def on_draw(self, event):
gloo.clear(color=True, depth=True)
self.program.draw('triangles', self.indices)
def on_resize(self, event):
gloo.set_viewport(0, 0, *event.size)
projection = perspective(45.0, event.size[0] / float(event.size[1]),
2.0, 10.0)
self.program['projection'] = projection
def on_timer(self, event):
self.theta += .5
self.phi += .5
model = np.eye(4, dtype=np.float32)
rotate(model, self.theta, 0, 0, 1)
rotate(model, self.phi, 0, 1, 0)
self.program['model'] = model
self.update()
if __name__ == '__main__':
c = Canvas()
c.show()
app.run()
|
py | b40a17ae709c191375e78f2f93769bc418c5d57a | """
This file offers the methods to automatically retrieve the graph Faecalicoccus pleomorphus.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def FaecalicoccusPleomorphus(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Faecalicoccus pleomorphus graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Faecalicoccus pleomorphus graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="FaecalicoccusPleomorphus",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | b40a17f446a25f88346213dac3b65a38b1a513a4 | # -*- coding: utf-8 -*-
import json
import logging
import re
from urllib.parse import unquote
from scrapcore.parser.parser import Parser
logger = logging.getLogger(__name__)
class GoogleParser(Parser):
"""Parses SERP pages of the Google search engine."""
search_engine = 'google'
search_types = ['normal', 'image']
effective_query_selector = [
'#topstuff .med > b::text', '.med > a > b::text'
]
no_results_selector = []
num_results_search_selectors = ['#resultStats']
page_number_selectors = ['div#foot div#navcnt td.cur::text']
normal_search_selectors = {
'results': {
'items': {
'container': '#center_col',
'result_container': 'div.g',
'link': 'div.r > a:first-of-type::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'div.r > a:first-of-type::text',
'visible_link': 'cite::text',
'rating': 'div.f.slp::text',
'sitelinks': 'div.osl::text'
},
},
'videos': {
'video_items': {
'container': 'g-inner-card',
'result_container': 'div.y8AWGd',
'link': 'div.y8AWGd a::attr(href)',
'snippet': 'div.y8AWGd::text',
'title': 'div.y8AWGd::text',
'visible_link': 'div.y8AWGd cite::text',
'rating': 'div.osl a:first-of-type::text',
'sitelinks': 'div.osl::text'
},
},
'news': {
'news_items': {
'container': 'g-scrolling-carousel',
'result_container': 'div.So9e7d',
'link': 'div.So9e7d a::attr(href)',
'snippet': 'div.So9e7d::text',
'title': 'div.So9e7d div.Igo7ld::text',
'visible_link': 'div.So9e7d cite::text',
'rating': 'div.osl a:first-of-type::text',
'sitelinks': 'div.osl::text'
},
},
'shopping': {
'shopping_items_main': {
'container': 'div.top-pla-group-inner',
'result_container': 'div.mnr-c.pla-unit',
'link': 'div.mnr-c.pla-unit a.pla-unit-title-link::attr(href)',
'snippet': 'div.mnr-c.pla-unit::text',
'title': 'div.mnr-c.pla-unit a.pla-unit-title-link > span::text',
'visible_link': 'a.FfKHB::attr(href)',
'rating': 'xxxx',
'sitelinks': 'a.FfKHB > span::text'
},
'shopping_items_side': {
'container': 'div.cu-container',
'result_container': 'div.mnr-c.pla-unit',
'link': 'div.mnr-c.pla-unit a.pla-unit-title-link::attr(href)',
'snippet': 'div.mnr-c.pla-unit::text',
'title': 'div.mnr-c.pla-unit a.pla-unit-title-link > span::text',
'visible_link': 'a.FfKHB::attr(href)',
'rating': 'xxxx',
'sitelinks': 'a.FfKHB > span::text'
},
},
'ads_main': {
'ads_item': {
'container': '#center_col',
'result_container': '.ads-ad',
'link': 'div.ad_cclk > a:nth-child(2)::attr(href)',
'snippet': 'div.ads-creative::text',
'title': 'div.ad_cclk > a:nth-child(2)::text',
'visible_link': '.ads-visurl cite::text',
'rating': 'div.xyt0c span::text',
'sitelinks': 'ul.OkkX2d::text'
}
},
'related_keywords': {
'related_items': {
'container': 'div.card-section',
'result_container': 'p.nVcaUb',
'keyword': 'a::text'
}
}
}
image_search_selectors = {
'image': {
'de_ip': {
'container': '#isr_mc div.rg_di',
# 'result_container': 'div.rg_di',
'snippet': 'div.rg_di > div.rg_meta',
'link': 'a.rg_l::attr(href)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
A typical scraped results looks like the following:
'/url?q=http://www.youtube.com/user/Apple&sa=U&ei=\
lntiVN7JDsTfPZCMgKAO&ved=0CFQQFjAO&usg=AFQjCNGkX65O-hKLmyq1FX9HQqbb9iYn9A'
Clean with a short regex.
"""
super().after_parsing()
if self.searchtype == 'normal':
if self.num_results > 0:
self.no_results = False
elif self.num_results <= 0:
self.no_results = True
if 'No results found for' in \
self.html or 'did not match any documents' in self.html:
self.no_results = True
# finally try in the snippets
if self.no_results is True:
for key, i in self.iter_serp_items():
if 'snippet' in self.search_results[key][i] and self.query:
if self.query.replace('"', '') in \
self.search_results[key][i]['snippet']:
self.no_results = False
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
if self.search_results[key][i]:
meta_dict = json.loads(self.search_results[key][i]['snippet'])
rank = self.search_results[key][i]['rank']
# logger.info(meta_dict)
self.search_results[key][i] = {
'link': meta_dict['ou'],
'snippet': meta_dict['s'],
'title': meta_dict['pt'],
'visible_link': meta_dict['isu'],
'rating': None,
'sitelinks': None,
'rank': rank
}
clean_regexes = {
'normal': r'/url\?q=(?P<url>.*?)&sa=U&ei=',
'image': r'imgres\?imgurl=(?P<url>.*?)&'
}
for key, i in self.iter_serp_items():
result = re.search(
clean_regexes[self.searchtype],
self.search_results[key][i]['link']
)
if result:
self.search_results[key][i]['link'] = unquote(
result.group('url')
)
|
py | b40a185083c258351b898a29e6dc05bde1e09248 | def reverse(n):
str_n=str(n)
return int(str_n[::-1])
n=int(input("Enter number to be reversed:"))
print(reverse(n))
|
py | b40a19b66aa8e41e2bcf827a544d4e8b81445fa0 | """
scaffoldgraph.analysis.representation
Module contains general functions for scaffold analysis
"""
def get_virtual_scaffolds(scaffoldgraph, data=False, default=None):
"""Get 'virtual' scaffolds within a scaffold graph.
Virtual scaffolds represent scaffolds that are not directly obtained from
any molecule of the collection, but generated by the pruning process.
Virtual scaffolds may provide promising starting points for the synthesis
or acquisition of compounds complementing the current collection.
Parameters
----------
scaffoldgraph : ScaffoldGraph
A ScaffoldGraph object to query
data : str, bool, optional
The scaffold node attribute returned in 2-tuple (n, ddict[data]).
If True, return entire node attribute dict as (n, ddict).
If False, return just the nodes n. The default is False.
default : value, bool, optional
Value used for nodes that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
list
A list of scaffold node keys corresponding to virtual scaffolds.
"""
virtual = []
for scaffold, d in scaffoldgraph.get_scaffold_nodes(True):
mol_count = 0
for succ in scaffoldgraph.successors(scaffold):
if scaffoldgraph.nodes[succ].get('type') == 'molecule':
mol_count += 1
if mol_count == 0:
if data is False:
virtual.append(scaffold)
elif data is True:
virtual.append((scaffold, d))
else:
virtual.append((scaffold, d.get(data, default)))
return virtual
def get_singleton_scaffolds(scaffoldgraph, data=False, default=None):
"""Get singleton scaffolds within a scaffold graph.
Singleton scaffolds represent scaffolds that are direct members of only
one compound in the current collection.
Parameters
----------
scaffoldgraph : ScaffoldGraph
A ScaffoldGraph object to query
data : str, bool, optional
The scaffold node attribute returned in 2-tuple (n, ddict[data]).
If True, return entire node attribute dict as (n, ddict).
If False, return just the nodes n. The default is False.
default : value, bool, optional
Value used for nodes that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
list
A list of scaffold node keys corresponding to virtual scaffolds.
"""
singletons = []
for scaffold, d in scaffoldgraph.get_scaffold_nodes(True):
mol_count = 0
for succ in scaffoldgraph.successors(scaffold):
if scaffoldgraph.nodes[succ].get('type') == 'molecule':
mol_count += 1
if mol_count == 1:
if data is False:
singletons.append(scaffold)
elif data is True:
singletons.append((scaffold, d))
else:
singletons.append((scaffold, d.get(data, default)))
return singletons
|
py | b40a19c3e454aff747bb484a2148abb3f7a2c4a4 | # # WAP accept 10 numbers in a tuple and find the sum of elements of tuple and display odd elements.
numbers = tuple()
odd = tuple()
for _ in range(10):
num = int(input("Enter number: "))
numbers += (num,)
print(sum(numbers))
for i in numbers:
if(i % 2 != 0):
odd += (i,)
print(odd)
# # Write a program to read email IDs of n number of students and store them in a tuple. Create two new tuples, one to store only the usernames from the email IDs and second to store domain names from the email ids. Print all three tuples at the end of the program.
email = tuple()
usernames = tuple()
domain = tuple()
n = int(input("How many email IDs would you like to enter: "))
for i in range(n):
email += (input("Enter email ID: "),)
usernames += (email[i].split('@')[0],)
domain += (email[i].split('@')[1],)
print(email)
print(usernames)
print(domain)
# # Write a program to input names of n students and store them in a tuple. Also, input a name from the user and find if this student is present in the tuple or not.
names = tuple()
n = int(input("How many names would you like to enter? "))
for _ in range(n):
names += (input("Enter name: "),)
name_Check = input("Enter name to check within list: ")
if(name_Check in names):
print("Name found in tuple")
else:
print("Name not found in tuple")
################################ HOMEWORK ##########################
'''Write a program to take in the roll number, name and percentage of marks for n students of Class X. Write user defined functions to
• accept details of the n students(n is the number of students)
• search details of a particular student on the basis of roll number and display result
• display the result of all the students
• find the topper amongst them'''
name = list()
roll = list()
percent = list()
def accept_details():
for _ in range(int(input("How many student's details you wanna enter ? "))):
name.append(input("Enter Student name: "))
roll.append(int(input("Enter Roll No: ")))
percent.append(float(input("Enter percent: ")))
def search_details(roll_No):
index = 0
for i in range(len(roll)):
if roll[i] == roll_No:
index = i
print(
f"Name of Student having roll no {roll_No}: {name[index]} has the percentage {percent[index]}")
def display_results():
for i in range(len(name)):
print(f"Name: {name[i]} Roll No: {roll[i]} Percent: {percent[i]}")
def topper_find():
topper = percent[0]
index = 0
for i in range(1, len(percent)):
if(topper < percent[i]):
topper = percent[i]
index = i
print(
f"The topper is {name[index]} bearing Roll No: {roll[index]} having percentage: {percent[index]}")
print("First fill up the details of the student of your Class X ...")
accept_details()
userChoice = int(input(
"1. Wanna search particular student details ?\n2. Wanna display all the students result ?\n3. Wanna find the topper amoung them ?\n4.Exit.\nEnter 1,2,3,4.. "))
if(userChoice == 1):
rollNO = int(input("Enter Roll No of the Student: "))
search_details(rollNO)
elif(userChoice == 2):
display_results()
elif(userChoice == 3):
topper_find()
else:
exit()
|
py | b40a1a880ad9226adccdd31b0e87b87348f201fc | #!/usr/bin/env python3
import argparse
import os
import pysh_builtins
import pysh_read
import shlex
import subprocess
import sys
def load_rc() -> None:
pysh_read.interpet(os.environ["PYSHRC"])
def check_rc(file):
file = os.path.abspath(os.path.expanduser(file))
err_msg_start = f"error: rc file at '{file}'"
try:
open(file, 'r').close()
except FileNotFoundError:
raise argparse.ArgumentTypeError(f"{err_msg_start}: does not exist")
except PermissionError:
raise argparse.ArgumentTypeError(f"{err_msg_start}: insufficient permissions to open file")
except OSError as oe:
raise argparse.ArgumentTypeError(f"{err_msg_start}: cannot open file [Errno {oe.errno}]")
return file
def get_user_input() -> tuple[str, ...]:
prompt = os.getenv("PROMPT").format(**os.environ)
user_input = input(prompt)
user_input_split = shlex.split(user_input)
return tuple(user_input_split)
def prompt() -> None:
while True:
try:
user_input_split = get_user_input()
user_input_cmd = user_input_split[0]
if user_input_cmd == "exit":
break
elif user_input_cmd in pysh_builtins.BUILTINS:
pysh_builtins.run_builtin(user_input_cmd, user_input_split[1:])
else:
subprocess.run(user_input_split)
except (EOFError, KeyboardInterrupt):
print()
except FileNotFoundError:
print(f"pysh: unknown command: {user_input_split[0]}", file=sys.stderr)
def setup_env_vars() -> None:
os.environ["HOST"] = os.uname()[1]
def fetch_args() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--config", "--rcfile", "-r", nargs=1, default=["~/.pyshrc"], help="specify a custom RC file to use for configuration", type=check_rc)
args = vars(parser.parse_args())
os.environ["PYSHRC"] = args["config"][0]
def main() -> None:
fetch_args()
setup_env_vars()
load_rc()
prompt()
if __name__ == "__main__":
main()
|
py | b40a1ab95b3d3ec41ebbe1daa115f37ec4f02592 | """
Hello World interface for the client library
"""
from client.client.core import (
do_hello,
do_bye
)
def hello(person):
return do_hello(person)
def bye(person):
return do_bye(person)
|
py | b40a1bd98d92621bacdfd2908ecf567ff7e1a6a0 | import pandas as pd
import numpy as np
import scipy.stats as stats
def analyze_returns(net_returns):
"""
Perform a t-test, with the null hypothesis being that the mean return is zero.
Parameters
----------
net_returns : Pandas Series
A Pandas Series for each date
Returns
-------
t_value
t-statistic from t-test
p_value
Corresponding p-value
"""
# TODO: Perform one-tailed t-test on net_returns
# Hint: You can use stats.ttest_1samp() to perform the test.
# However, this performs a two-tailed t-test.
# You'll need to divde the p-value by 2 to get the results of a one-tailed p-value.
null_hypothesis = 0.0
t, p = stats.ttest_1samp(net_returns, null_hypothesis)
return t, p/2
def test_run(filename='net_returns.csv'):
"""Test run analyze_returns() with net strategy returns from a file."""
net_returns = pd.Series.from_csv("net_return.csv", header=0)
t, p = analyze_returns(net_returns)
print("t-statistic: {:.3f}\np-value: {:.6f}".format(t, p))
if __name__ == '__main__':
test_run() |
py | b40a1c7538c272100f9b034e4c66f6e3f7864e74 | """
WSGI config for liberaction project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'liberaction.settings')
application = get_wsgi_application()
|
py | b40a1c8a032f11536abd044a94da0603ca1d7e4e | import click
import logging
import pandas
from typing import Union, List, Dict
import networkx as nx
from prefixcommons.curie_util import expand_uri
from kgx.utils.kgx_utils import get_toolkit
def map_graph(graph: nx.MultiDiGraph, mapping: Dict, preserve: bool = True) -> nx.MultiDiGraph:
"""
Remap node identifiers in a networkx.MultiDiGraph based on a provided mapping.
For nodes, the old identifier is saved as `source_curie` attribute.
In case of edges,
- if the node is the `subject` then the old identifier is saved as `source_subject`
- if the node is the `object` then the old identifier is saved as `source_object`
Parameters
----------
graph: networkx.MultiDiGraph
A graph
mapping: dict
Dictionary containing node identifier mappings
preserve: bool
Preserve the old identifier before remapping.
Returns
-------
networkx.MultiDiGraph
The graph with its nodes remapped
"""
if preserve:
for nid in graph.nodes():
if nid in mapping:
# add_node will append attributes
graph.add_node(nid, source_curie=nid)
for oid, sid in graph.edges():
if oid in mapping:
for ex in graph[oid][sid]:
graph[oid][sid][ex].update(source_object=oid)
if sid in mapping:
for ex in graph[oid][sid]:
graph[oid][sid][ex].update(source_subject=oid)
nx.relabel_nodes(graph, mapping, copy=False)
return graph
def graceful_update(a: Dict, b: Dict) -> Dict:
"""
Update keys in dictionary `a` with new values from dictionary `b`.
This method will not change the `type` of a value that already exists
in dictionary `a`. If a value in dictionary `a` is a list, then the
new values from dictionary `b` will be appended to the existing list.
Parameters
----------
a: dict
Dictionary to update
b: dict
Dictionary with keys to update along with new values
Returns
-------
dict
The updated dictionary
"""
for key, value in b.items():
if key in a:
if isinstance(a[key], list) and isinstance(value, list):
for x in value:
if x not in a[key]:
a[key].append(x)
elif isinstance(a[key], list) and not isinstance(value, list):
if value not in a[key]:
a[key].append(value)
elif a[key] is None:
a[key] = value
else:
pass
else:
a[key] = value
return a
def relabel_nodes(graph: nx.MultiDiGraph, mapping: Dict) -> nx.MultiDiGraph:
"""
Performs the relabelling of nodes, and ensures that node attributes are
copied over appropriately.
Example:
graph = nx.Graph()
graph.add_edge('a', 'b')
graph.add_edge('c', 'd')
graph.node['a']['synonym'] = ['A']
graph.node['b']['synonym'] = ['B']
graph.node['c']['synonym'] = ['C']
graph.node['d']['synonym'] = ['D']
graph = relabel_nodes(graph, {'c' : 'b'})
for n in graph.nodes():
print(n, graph.node[n])
Output:
a {'synonym': ['A']}
b {'synonym': ['B', 'C']}
d {'synonym': ['D']}
"""
logging.info("Relabeling {} nodes".format(len(mapping)))
g = nx.relabel_nodes(graph, mapping, copy=True)
logging.info("Merging node attributes")
with click.progressbar(graph.nodes(), label='Progress') as bar:
for n in bar:
if n in mapping:
graceful_update(g.node[mapping[n]], graph.node[n])
elif n in g:
graceful_update(g.node[n], graph.node[n])
else:
pass
return g
def listify(o: object) -> Union[list, set, tuple]:
"""
Enclose a given object in a list.
If the object itself is a list, set or tuple then it returns the
object unchanged.
Parameters
----------
o: object
Any valid object
Returns
-------
Union[list, set, tuple]
A list or set or tuple
"""
if isinstance(o, (list, set, tuple)):
return o
else:
return [o]
def get_prefix(curie: str, default: str = None) -> str:
"""
Get prefix for a given CURIE.
Returns `default` if no prefix is found.
Parameters
----------
curie: str
A CURIE
default: str
Default value to return, if no prefix found
Returns
-------
str
The prefix of a given CURIE
"""
prefix = None
if ':' in curie:
prefix, _ = curie.rsplit(':', 1)
else:
prefix = default
return prefix
def build_sort_key(list_of_prefixes: List[List[str]]):
"""
For a list of lists of prefixes, gets the lowest
index of a matching prefix.
Parameters
----------
list_of_prefixes: list
A list of lists of prefixes
"""
def key(n):
k = len(list_of_prefixes) + 1
p = get_prefix(n, default='').upper()
for prefixes in list_of_prefixes:
for i, prefix in enumerate(prefixes):
if p == prefix.upper():
if i < k:
k = i
return k
return key
class ReportBuilder(object):
def __init__(self, graph):
self.graph = graph
self.records = []
def add(self, node, xref):
provided_by = self.graph.node[node].get('provided_by')
if provided_by is not None:
provided_by = '; '.join(provided_by)
self.records.append({
'node' : node,
'xref' : xref,
'provided_by' : provided_by,
})
def to_csv(self, path, **kwargs):
df = pandas.DataFrame(self.records)
df = df[['node', 'xref', 'provided_by']]
if 'index' not in kwargs:
kwargs['index'] = False
df.to_csv(path, **kwargs)
def update(d:dict, key, value):
if key is None or value is None:
return
if isinstance(value, list):
for v in value:
update(d, key, v)
if key in d:
if isinstance(d, list):
if value not in d[key]:
d[key].append(value)
elif d[key] != value:
d[key] = [d[key], value]
else:
d[key] = value
def build_clique_graph(graph:nx.Graph) -> nx.Graph:
"""
Builds a graph induced by `same_as` relationships.
"""
cliqueGraph = nx.Graph()
with click.progressbar(graph.nodes(), label='building cliques') as bar:
for n in bar:
attr_dict = graph.node[n]
if 'same_as' in attr_dict:
for m in attr_dict['same_as']:
cliqueGraph.add_edge(n, m, provided_by=attr_dict['provided_by'])
for key, value in graph.node[n].items():
update(cliqueGraph.node[n], key, value)
update(cliqueGraph.node[n], 'is_node', True)
return cliqueGraph
def clique_merge(graph:nx.Graph, report=False) -> nx.Graph:
"""
Builds up cliques using the `same_as` attribute of each node. Uses those
cliques to build up a mapping for relabelling nodes. Chooses labels so as
to preserve the original nodes, rather than taking xrefs that don't appear
as nodes in the graph.
This method will also expand the `same_as` attribute of the nodes to
include the discovered clique.
"""
original_size = len(graph)
print('original graph has {} nodes'.format(original_size))
cliqueGraph = nx.Graph()
with click.progressbar(graph.nodes(data=True), label='building cliques from same_as node property') as bar:
for n, attr_dict in bar:
if 'same_as' in attr_dict:
for m in attr_dict['same_as']:
cliqueGraph.add_edge(n, m)
with click.progressbar(graph.edges(data=True), label='building cliques from same_as edges') as bar:
for u, v, attr_dict in bar:
if 'edge_label' in attr_dict and attr_dict['edge_label'] == 'same_as':
cliqueGraph.add_edge(u, v)
edges = []
with click.progressbar(cliqueGraph.edges(), label='Breaking invalid cliques') as bar:
for u, v in bar:
try:
u_categories = graph.node[u].get('category', [])
v_categories = graph.node[v].get('category', [])
except:
continue
l = len(edges)
for a in u_categories:
if len(edges) > l:
break
if get_toolkit().get_element(a) is None:
continue
for b in v_categories:
if get_toolkit().get_element(b) is None:
continue
a_ancestors = get_toolkit().ancestors(a)
b_ancestors = get_toolkit().ancestors(b)
if a_ancestors == b_ancestors == []:
continue
elif a not in b_ancestors and b not in a_ancestors:
edges.append((u, v))
break
print('breaking {} many edges'.format(len(edges)))
cliqueGraph.remove_edges_from(edges)
mapping = {}
connected_components = list(nx.connected_components(cliqueGraph))
print('Discovered {} cliques'.format(len(connected_components)))
with click.progressbar(connected_components, label='building mapping') as bar:
for nodes in bar:
nodes = list(nodes)
categories = set()
for n in nodes:
if not graph.has_node(n):
continue
attr_dict = graph.node[n]
attr_dict['same_as'] = nodes
if 'category' in attr_dict:
categories.update(listify(attr_dict['category']))
if 'categories' in attr_dict:
categories.update(listify(attr_dict['categories']))
list_of_prefixes = []
for category in categories:
try:
list_of_prefixes.append(get_toolkit().get_element(category).id_prefixes)
except:
pass
nodes.sort()
nodes.sort(key=build_sort_key(list_of_prefixes))
for n in nodes:
if n != nodes[0]:
mapping[n] = nodes[0]
g = relabel_nodes(graph, mapping)
edges = []
for u, v, key, data in g.edges(keys=True, data=True):
if data.get('edge_label') == 'same_as':
edges.append((u, v, key))
g.remove_edges_from(edges)
for n, data in g.nodes(data=True):
data['iri'] = expand_uri(n)
if 'id' in data and data['id'] != n:
data['id'] = n
if 'same_as' in data and n in data['same_as']:
data['same_as'].remove(n)
if data['same_as'] == []:
del data['same_as']
final_size = len(g)
print('Resulting graph has {} nodes'.format(final_size))
print('Eliminated {} nodes'.format(original_size - final_size))
return g
|
py | b40a1cca27f379533f12d2eb5b0972e1422d5336 | #! Copyright (C) 2017 Lukas Löhle
#!
#! This software may be modified and distributed under the terms
#! of the MIT license. See the LICENSE file for details.
import re
import unicodedata
from flask import render_template, flash, redirect, url_for, request
from flask.blueprints import Blueprint
from forms import CellForm, TaskForm, TemplateForm, OrderOptionForm, ConditionForm, NotebookOptionsForm
import os
import errno
import json
import copy
from pygments.lexers.python import PythonLexer
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
from database import db
from models import Cell, CellType, Task, TaskCell, NotebookTemplate, OrderOption, Condition, StringPair, NotebookFile
nb_gen = Blueprint('nbg', __name__, template_folder='templates', static_folder='static')
@nb_gen.route('/')
@nb_gen.route('/index')
@nb_gen.route('/home')
def index():
return render_template('home.html')
@nb_gen.route('/tutorial')
def show_tutorial():
return render_template('tutorial.html')
@nb_gen.route('/notebooks/new', methods=['GET', 'POST'])
def create_notebook():
form = TemplateForm()
task_list = Task.query.order_by(Task.short).all()
form.tasks.choices = [(task.id, "[{}] {}".format(task.short, task.name)) for task in task_list]
for subform in form.order_options:
subform.tasks_random.choices = [(task.id, task.name) for task in task_list]
subform.tasks_fixed.choices = [(task.id, task.name) for task in task_list]
if form.validate_on_submit():
if NotebookTemplate.query.filter_by(name=form.name.data).first():
flash('A notebook template with this name already exists', 'danger')
else:
notebook = NotebookTemplate(form.name.data)
for task_id in form.tasks.data:
notebook.tasks.append(Task.query.get(task_id))
db.session.add(notebook)
db.session.commit()
for idx, option in enumerate(form.order_options.entries):
order_option = OrderOption(idx, option.order_type.data == 'random')
order_option.notebook = notebook.id
if order_option.random:
for task_id in option.tasks_random.data:
order_option.tasks.append(Task.query.get(task_id))
else:
order_option.tasks.append(Task.query.get(option.tasks_fixed.data))
db.session.add(order_option)
db.session.commit()
flash('Template created', 'success')
return redirect(url_for('nbg.list_notebooks'))
if not form.tasks.data:
form.tasks.data = [task_list[0].id]
selected_tasks = [task_id for task_id in form.tasks.data]
return render_template('notebook.html', form=form, selected_tasks=selected_tasks,
action=url_for('nbg.create_notebook'))
@nb_gen.route('/tasks/new', methods=['GET', 'POST'])
def create_task():
form = TaskForm()
cell_list = Cell.query.order_by(Cell.name).all()
for field in form.cells.entries:
field.choices = [(cell.id, "{} ({})".format(cell.name, cell.cell_type)) for cell in cell_list]
if form.validate_on_submit():
if Task.query.filter_by(name=form.name.data).first():
flash('A task with this name already exists.', 'danger')
else:
cell_ids = []
for field in form.cells.entries:
if field.data in cell_ids:
flash('You cannot add a cell to a task twice.', 'danger')
return render_template('task.html', form=form, action=url_for('nbg.create_task'))
cell_ids.append(field.data)
task = Task(form.name.data, form.short.data)
task.description = form.description.data
for idx, field in enumerate(form.cells.entries):
task_cell = TaskCell(position=idx)
task_cell.cell = Cell.query.filter_by(id=field.data).first()
task.cells.append(task_cell)
db.session.add(task)
db.session.commit()
flash('Task created', 'success')
return redirect(url_for('nbg.list_tasks'))
return render_template('task.html', form=form, action=url_for('nbg.create_task'))
@nb_gen.route('/notebooks', methods=['GET', 'POST'])
def list_notebooks():
if request.method == 'POST':
if request.form['delete']:
notebook = NotebookTemplate.query.get_or_404(request.form['delete'])
name = notebook.name
db.session.delete(notebook)
db.session.commit()
flash('Deleted "{}"'.format(name), 'info')
tasks_exist = Task.query.first() is not None
notebook_list = NotebookTemplate.query.order_by(NotebookTemplate.name).all()
return render_template('notebooks.html', notebooks=notebook_list, tasks_exist=tasks_exist)
@nb_gen.route('/tasks', methods=['GET', 'POST'])
def list_tasks():
if request.method == 'POST' and request.form['delete']:
task = Task.query.get_or_404(request.form['delete'])
task_name = task.name
db.session.delete(task)
db.session.commit()
flash('Deleted "{}"'.format(task_name), 'info')
cells_exist = Cell.query.first() is not None
task_list = Task.query.order_by(Task.short).all()
return render_template('tasks.html', task_list=task_list, cells_exist=cells_exist)
@nb_gen.route('/cells', methods=['GET', 'POST'])
def list_cells():
if request.method == 'POST' and request.form['delete']:
cell = Cell.query.get_or_404(request.form['delete'])
cell_name = cell.name
db.session.delete(cell)
db.session.commit()
flash('Deleted "{}"'.format(cell_name), 'info')
cell_list = Cell.query.order_by(Cell.name).all()
return render_template('cells.html', cell_list=cell_list)
@nb_gen.route('/conditions', methods=['GET', 'POST'])
def list_conditions():
if request.method == 'POST' and request.form['delete']:
condition = Condition.query.get_or_404(request.form['delete'])
condition_name = condition.name
db.session.delete(condition)
db.session.commit()
flash('Deleted "{}"'.format(condition_name), 'info')
condition_list = Condition.query.order_by(Condition.name).all()
return render_template('conditions.html', condition_list=condition_list)
@nb_gen.route('/tasks/<int:task_id>')
def view_task(task_id):
task = Task.query.filter_by(id=task_id).first_or_404()
code_cells = {}
css = None
for task_cell in task.cells:
if task_cell.cell.cell_type == 'code':
code, css = highlight_code(task_cell.cell.source)
code_cells[task_cell.cell.id] = code
return render_template('task_view.html', task=task, css=css, code_cells=code_cells)
@nb_gen.route('/cells/<int:cell_id>')
def view_cell(cell_id):
cell = Cell.query.filter_by(id=cell_id).first_or_404()
code = None
css = None
if cell.cell_type == 'code':
code, css = highlight_code(cell.source)
return render_template('cell_view.html', cell=cell, code=code, css=css)
@nb_gen.route('/notebooks/<int:nb_id>/generate', methods=['GET', 'POST'])
def generate_notebooks(nb_id):
nb = NotebookTemplate.query.get_or_404(nb_id)
form = NotebookOptionsForm()
conditions = Condition.query.all()
form.conditions.choices = [(condition.id, condition.name) for condition in conditions]
if form.validate_on_submit():
include_fixed = form.include_fixed.data
file_prefix = form.file_prefix.data
if form.conditions.data:
selected_conditions = Condition.query.filter(Condition.id.in_(form.conditions.data)).all()
else:
selected_conditions = None
nb_names = generate_notebook_names(nb, fileprefix=file_prefix,
include_fixed=include_fixed, conditions=selected_conditions)
if request.form.get('generate', False):
number = generate_notebook_files(nb, fileprefix=file_prefix,
include_fixed=include_fixed, conditions=selected_conditions)
flash('{} notebooks have been generated'.format(number), 'success')
else:
if form.errors:
print(form.errors)
form.file_prefix.data = slugify(nb.name)
nb_names = generate_notebook_names(nb)
return render_template('nb_generator.html', nb_names=nb_names, nb=nb, form=form)
@nb_gen.route('/notebooks/<int:nb_id>/edit', methods=['GET', 'POST'])
def edit_notebook(nb_id):
nb = NotebookTemplate.query.get_or_404(nb_id)
form = TemplateForm(obj=nb)
task_list = Task.query.order_by(Task.short).all()
form.tasks.choices = [(task.id, "[{}] {}".format(task.short, task.name)) for task in task_list]
selected_tasks = [task.id for task in nb.tasks]
if request.method == 'GET':
form.tasks.data = [task.id for task in nb.tasks]
for _ in form.order_options:
form.order_options.pop_entry()
for order_option in nb.options:
option_field = OrderOptionForm()
option_field.order_type = 'random' if order_option.random else 'fixed'
form.order_options.append_entry(option_field)
for (idx, subform) in enumerate(form.order_options):
subform.tasks_random.choices = [(task.id, task.name) for task in task_list]
subform.tasks_fixed.choices = [(task.id, task.name) for task in task_list]
if request.method == 'GET':
subform.tasks_random.data = [task.id for task in nb.options[idx].tasks] if nb.options[idx].random else None
subform.tasks_fixed.data = nb.options[idx].tasks[0].id if not nb.options[idx].random else None
if form.validate_on_submit():
if form.name.data != nb.name and NotebookTemplate.query.filter_by(name=form.name.data).first():
flash('A notebook with this name already exists.', 'danger')
else:
nb.name = form.name.data
selected_tasks = form.tasks.data
nb.tasks = Task.query.filter(Task.id.in_(selected_tasks)).all()
to_remove = len(nb.options) - len(form.order_options.entries)
if to_remove > 0:
nb.options = nb.options[:-to_remove]
for (idx, subform) in enumerate(form.order_options.entries):
is_random = subform.order_type.data == 'random'
option_tasks = subform.tasks_random.data if is_random else [subform.tasks_fixed.data]
if idx >= len(nb.options):
# new option
option = OrderOption(idx, is_random)
nb.options.append(option)
else:
option = nb.options[idx]
option.random = is_random
option.tasks = Task.query.filter(Task.id.in_(option_tasks)).all()
db.session.commit()
flash('Saved changes', 'success')
return render_template('notebook.html', form=form, notebook=nb, selected_tasks=selected_tasks,
action=url_for('nbg.edit_notebook', nb_id=nb_id))
@nb_gen.route('/tasks/<int:task_id>/edit', methods=['GET', 'POST'])
def edit_task(task_id):
task = Task.query.get_or_404(task_id)
form = TaskForm(obj=task)
cell_list = Cell.query.order_by(Cell.name).all()
for idx, field in enumerate(form.cells.entries):
field.choices = [(cell.id, "{} ({})".format(cell.name, cell.cell_type)) for cell in cell_list]
if request.method == 'GET':
field.data = task.cells[idx].cell.id if len(task.cells) > idx else None
if form.validate_on_submit():
if form.name.data != task.name and Task.query.filter_by(name=form.name.data).first():
flash('A task with this name already exists.', 'danger')
else:
task.name = form.name.data
task.description = form.description.data
task.short = form.short.data
for task_cell in task.cells:
db.session.delete(task_cell)
for idx, field in enumerate(form.cells.entries):
task_cell = TaskCell(position=idx)
task_cell.cell = Cell.query.filter_by(id=field.data).first()
task.cells.append(task_cell)
db.session.commit()
flash('Saved changes', 'success')
return render_template('task.html', form=form, task=task, action=url_for('nbg.edit_task', task_id=task_id))
@nb_gen.route('/cells/<int:cell_id>/edit', methods=['GET', 'POST'])
def edit_cell(cell_id):
cell = Cell.query.get_or_404(cell_id)
form = CellForm(obj=cell)
if request.method == 'POST':
if form.validate():
if form.name.data != cell.name and Cell.query.filter_by(name=form.name.data).first():
flash('Cell with this name already exists', 'danger')
else:
cell.name = form.name.data
cell.collapsed = form.collapsed.data
cell.cell_type = form.cell_type.data
cell.source = form.source.data
cell.set_metadata(form.cell_metadata.data)
db.session.commit()
flash('Saved changes', 'success')
return render_template('cell.html', form=form, cell=cell, action=url_for('nbg.edit_cell', cell_id=cell_id))
else:
for i in range(len(form.cell_metadata.entries)):
form.cell_metadata.pop_entry()
celldata = cell.get_metadata()
for entry in celldata:
form.cell_metadata.append_entry(entry)
if not celldata:
form.cell_metadata.append_entry({})
return render_template('cell.html', form=form, cell=cell, action=url_for('nbg.edit_cell', cell_id=cell_id))
@nb_gen.route('/conditions/<int:condition_id>/edit', methods=['GET', 'POST'])
def edit_condition(condition_id):
condition = Condition.query.get_or_404(condition_id)
form = ConditionForm(obj=condition)
if form.validate_on_submit():
if form.name.data != condition.name and Condition.query.filter_by(name=form.name.data).first():
flash('Condition with this name already exists', 'danger')
else:
condition.name = form.name.data
to_remove = len(condition.pairs) - len(form.pairs.entries)
if to_remove > 0:
condition.pairs = condition.pairs[:-to_remove]
for idx, pair in enumerate(form.pairs.entries):
if idx >= len(condition.pairs):
new_pair = StringPair(pair.key.data, pair.value.data)
condition.pairs.append(new_pair)
else:
condition.pairs[idx].key = pair.key.data
condition.pairs[idx].value = pair.value.data
db.session.commit()
flash('saved changes', 'success')
return render_template('condition.html', form=form, action=url_for('nbg.edit_condition', condition_id=condition_id))
@nb_gen.route('/cells/new', methods=['GET', 'POST'])
def create_cell():
form = CellForm()
if form.validate_on_submit():
if Cell.query.filter_by(name=form.name.data).first():
flash('A cell with this name already exists.', 'danger')
return render_template('cell.html', action=url_for('nbg.create_cell'), form=form)
cell = Cell(form.name.data, CellType(form.cell_type.data), form.source.data)
cell.collapsed = form.collapsed.data
cell.set_metadata(form.cell_metadata.data)
db.session.add(cell)
db.session.commit()
flash('Cell created', 'success')
return redirect(url_for('nbg.list_cells'))
return render_template('cell.html', action=url_for('nbg.create_cell'), form=form)
@nb_gen.route('/conditions/new', methods=['GET', 'POST'])
def create_condition():
form = ConditionForm()
if form.validate_on_submit():
if Condition.query.filter_by(name=form.name.data).first():
flash('A condition with this name already exists.', 'danger')
return render_template('condition.html', action=url_for('nbg.create_condition'), form=form)
condition = Condition(form.name.data)
for field in form.pairs:
condition.pairs.append(StringPair(field.key.data, field.value.data))
db.session.add(condition)
db.session.commit()
flash('Condition created', 'success')
return redirect(url_for('nbg.list_conditions'))
return render_template('condition.html', action=url_for('nbg.create_condition'), form=form)
def highlight_code(code):
# TODO: refactor this into a code cell model to support multiple languages
"""
Creates html and css for python code highlighting.
:param code: The python code to highlight
:return: A dictionary with html code and css styling
"""
lexer = PythonLexer()
formatter = HtmlFormatter()
code_html = highlight(code, lexer, formatter)
code_css = formatter.get_style_defs()
return code_html, code_css
def generate_notebook_names(notebook, fileprefix=None, include_fixed=True, conditions=None):
if not fileprefix:
fileprefix = notebook.name
fileprefix = slugify(fileprefix)
if conditions:
names = []
for condition in conditions:
names += [fileprefix + '_' + slugify(condition.name)]
else:
names = [fileprefix]
first_task = True
for option in notebook.options:
if not option.random and include_fixed:
task_short = slugify(option.tasks[0].short)
if first_task:
task_short = '_[' + task_short
first_task = False
else:
task_short = '_' + task_short
names = [name + task_short for name in names]
elif option.random:
new_names = []
orders = latin_squares[min(5, len(option.tasks))]
for name in names:
for order in orders:
if first_task:
new_name = name + '_' + '['
else:
new_name = name + '_'
for idx in order:
new_name += slugify(option.tasks[idx].short)
new_names.append(new_name)
first_task = False
names = new_names
names = [name + '].ipynb' if '[' in name else name + '.ipynb' for name in names]
return names
def generate_notebook_files(notebook, fileprefix=None, include_fixed=True, conditions=None):
if not fileprefix:
fileprefix = notebook.name
fileprefix = slugify(fileprefix)
nb_base = {'metadata': notebook_metadata,
'nbformat': notebook_nbformat,
'nbformat_minor': notebook_nbformat_minor,
'cells': []}
if conditions:
files = {}
task_replace_ops = {}
for condition in conditions:
file_name = fileprefix + '_' + slugify(condition.name)
files[file_name] = copy.deepcopy(nb_base)
task_replace_ops[file_name] = get_condition_replace_ops(condition)
else:
files = {fileprefix: nb_base}
task_replace_ops = {fileprefix: []}
task_index = 0
first_task = True
for option in notebook.options:
if not option.random:
if include_fixed:
task_short = slugify(option.tasks[0].short)
for filename in list(files.keys()):
if first_task:
new_filename = filename + '_[' + task_short
else:
new_filename = filename + '_' + task_short
files[new_filename] = files.pop(filename)
task_replace_ops[new_filename] = task_replace_ops.pop(filename)
first_task = False
fixed_replace_ops = get_task_replace_ops(task_index, option.tasks[0])
for operations in task_replace_ops.values():
operations.extend(fixed_replace_ops)
task_index += 1
cells = option.tasks[0].get_cell_list()
for nb in list(files.values()):
nb['cells'].extend(cells)
elif option.random:
tasks_short = [slugify(task.short) for task in option.tasks]
tasks_cells = [task.get_cell_list() for task in option.tasks]
task_orders = latin_squares[min(5, len(option.tasks))]
files_new = {}
replace_ops_new = {}
for task_order in task_orders:
order = ''
for idx in task_order:
order += tasks_short[idx]
for filename, content in files.items():
if first_task:
new_filename = filename + '_[' + order
else:
new_filename = filename + '_' + order
files_new[new_filename] = copy.deepcopy(content)
reordered_cells = []
for idx in task_order:
reordered_cells.extend(tasks_cells[idx])
files_new[new_filename]['cells'].extend(reordered_cells)
replace_ops_new[new_filename] = copy.deepcopy(task_replace_ops[filename])
for (idx, task_idx) in enumerate(task_order):
replace_ops_new[new_filename].extend(
get_task_replace_ops(task_index + idx, option.tasks[task_idx]))
first_task = False
files = files_new
task_replace_ops = replace_ops_new
task_index += len(option.tasks)
for filename in list(files.keys()):
apply_replace_ops(files[filename], task_replace_ops[filename])
new_filename = filename + ']' + '.ipynb' if '[' in filename else filename + '.ipynb'
files[new_filename] = files.pop(filename)
save_notebook_files(notebook, files)
write_db_schema()
return len(files.keys())
def get_task_replace_ops(task_index, task):
result = []
placeholder_task_name = '%task' + str(task_index) + '%'
placeholder_task_short = '%task' + str(task_index) + '.short%'
placeholder_task_desc = '%task' + str(task_index) + '.description%'
result.append((placeholder_task_name, task.name if task.name else ''))
result.append((placeholder_task_short, task.short if task.short else ''))
result.append((placeholder_task_desc, task.description if task.description else ''))
return result
def get_condition_replace_ops(condition):
result = []
for pair in condition.pairs:
replace_op = ('%' + pair.key + '%', pair.value)
result.append(replace_op)
return result
def apply_replace_ops(nb_element, replace_ops):
"""
:param nb_element: A notebook dict or one of its elements
:param replace_ops: A list of tuples in the form (placeholder, value). Those are applied to any string in
the provided element.
"""
if isinstance(nb_element, list):
for idx, element in enumerate(nb_element):
if isinstance(element, str):
nb_element[idx] = apply_ops_to_string(element, replace_ops)
else:
apply_replace_ops(element, replace_ops)
if isinstance(nb_element, dict):
for key in list(nb_element.keys()):
if isinstance(nb_element[key], str):
nb_element[key] = apply_ops_to_string(nb_element[key], replace_ops)
else:
apply_replace_ops(nb_element[key], replace_ops)
def apply_ops_to_string(string, replace_ops):
for (placeholder, value) in replace_ops:
string = string.replace(placeholder, value)
return string
def save_notebook_files(notebook, files):
# create directory if it does not exist
try:
os.makedirs('generated')
except OSError as e:
if e.errno != errno.EEXIST:
raise
nb_files = NotebookFile.query.filter_by(category=notebook.id)
for nb_file in nb_files:
file_path = os.path.join('generated', nb_file.filename)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print('Error deleting file {}: {}'.format(file_path, e))
db.session.delete(nb_file)
for filename, notebook_content in files.items():
with open(os.path.join('generated', filename), 'w', encoding='utf-8') as outfile:
json.dump(notebook_content, outfile, indent=4, separators=(',', ': '))
nb_file = NotebookFile(notebook.id, filename)
db.session.add(nb_file)
db.session.commit()
def write_db_schema(filename='dbSchema.sql'):
try:
os.makedirs('generated')
except OSError as e:
if e.errno != errno.EEXIST:
raise
files = NotebookFile.query.all()
with open(os.path.join('generated', filename), 'w', encoding='utf-8') as outfile:
#outfile.write(NotebookFile.get_table_string() + '\n')
i = 0
for file in files:
file_path = os.path.join('generated', file.filename)
try:
if os.path.isfile(file_path):
outfile.write(file.get_insert_string(i) + '\n')
i += 1
except Exception as e:
print('Error trying to check on file {}: {}'.format(file_path, e))
def slugify(value):
value = str(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
value = value.replace(' ', '_')
return value
notebook_metadata = {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2.0
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.9"
}
}
notebook_nbformat = 4
notebook_nbformat_minor = 0
latin_squares = {
1: [[0]],
2: [[0, 1], [1, 0]],
3: [[0, 1, 2], [1, 2, 0], [2, 0, 1]],
4: [[0, 1, 2, 3], [1, 0, 3, 2], [2, 3, 0, 1], [3, 2, 1, 0]],
5: [[0, 1, 2, 3, 4], [1, 2, 4, 0, 3], [2, 4, 3, 1, 0], [3, 0, 1, 4, 2], [4, 3, 0, 2, 1]]}
|
py | b40a1d208f9a2e1de7cb59a3dad8c2c224a1f310 | from argparse import ArgumentParser
from configparser import ConfigParser
import json
argparser = ArgumentParser(description='Backup camera images in the cloud')
argparser.add_argument("-c", "--config", required=True,
help="Configuration file")
argparser.add_argument("-s", "--source", required=True,
help="Source directory (usually the root of the SDCard)")
argparser.add_argument("--verify", required=False, action="store_true",
help="NOT IMPLEMENTED! Verify if each file has a corresponding object")
argparser.add_argument("--initdb", required=False, action="store_true",
help="NOT IMPLEMENTED! Create database table and index if non existent")
argparser.add_argument("--verbose", help="Write some informations about the process",
required=False, action="store_true")
argparser.add_argument("--backupdb", required=False, action="store_true",
help="Backup the SQLite database in the object storage after operations")
argparser.add_argument("--localcopy", required=False, action="store_true",
help="Make a local copy of each picture uploaded")
args = argparser.parse_args()
def vprint(*arguments, **kwarguments):
if args.verbose:
print(*arguments, **kwarguments)
config = ConfigParser(delimiters='=')
if config.read(args.config):
vprint("Configuration file: " + args.config)
else:
quit("ERROR: No configuration file at path [" + args.config + "]")
filetypes_json = 'filetypes.json'
if config['DEFAULT']['filetypes']:
filetypes_json = config['DEFAULT']['filetypes']
filetypes = open(filetypes_json)
fileinfo = json.load(filetypes)
# Following "hack" is… horrible :(
# Putting the file extensions in two tuples,
# one with the dot and one without the dot
exts = []
dot_exts = []
for fileext in fileinfo:
exts.append(str(fileext).upper())
dot_exts.append('.'+str(fileext).upper())
exts = tuple(exts)
dot_exts = tuple(dot_exts)
|
py | b40a1df0eb5ec5dc6c89fa655ab93b3868d98160 | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcls import __version__
from mmcls.apis import init_random_seed, set_random_seed, train_model
from mmcls.datasets import build_dataset
from mmcls.models import build_classifier
from mmcls.utils import collect_env, get_root_logger, setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--device', help='device used for training. (Deprecated)')
group_gpus.add_argument(
'--gpus',
type=int,
help='(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
model = build_classifier(cfg.model)
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
# save mmcls version, config file content and class names in
# runner as meta data
meta.update(
dict(
mmcls_version=__version__,
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES))
# add an attribute for visualization convenience
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
device='cpu' if args.device == 'cpu' else 'cuda',
meta=meta)
if __name__ == '__main__':
main()
|
py | b40a1e047b16765aefb7a98f05508c511eabf7a2 | class Sources:
def __init__(self, name, description, url):
self.name = name,
self.description = description
self.url = url
class Articles:
"""Define article model"""
def __init__(self, source, author, title, description, url, urlToImage, publishedAt):
self.source = source
self.author = author
self.title = title
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
|
py | b40a1fd260aab3880730286c688a6329f3083ed6 | from output.models.ms_data.regex.regex_test_72_xsd.regex_test_72 import Doc
__all__ = [
"Doc",
]
|
py | b40a20404f445636ce17790e01c2c18af6ad6386 | from django.urls import path
from accounts import views
urlpatterns = [
# Listings home/index page
path('login', views.login, name='login'),
path('register', views.register, name='register'),
path('logout', views.logout, name='logout'),
path('dashboard', views.dashboard, name='dashboard')
]
|
py | b40a224a859271210987777174d9ae07fbeb8a98 | """Classification-based test and kernel two-sample test.
Author: Sandro Vega-Pons, Emanuele Olivetti.
"""
import os
import numpy as np
from sklearn.metrics import pairwise_distances, confusion_matrix
from sklearn.metrics import pairwise_kernels
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, KFold, cross_val_score
from sklearn.grid_search import GridSearchCV
from kernel_two_sample_test import MMD2u, compute_null_distribution
from kernel_two_sample_test import compute_null_distribution_given_permutations
import matplotlib.pylab as plt
from joblib import Parallel, delayed
def compute_rbf_kernel_matrix(X):
"""Compute the RBF kernel matrix with sigma2 as the median pairwise
distance.
"""
sigma2 = np.median(pairwise_distances(X, metric='euclidean'))**2
K = pairwise_kernels(X, X, metric='rbf', gamma=1.0/sigma2, n_jobs=-1)
return K
def balanced_accuracy_scoring(clf, X, y):
"""Scoring function that computes the balanced accuracy to be used
internally in the cross-validation procedure.
"""
y_pred = clf.predict(X)
conf_mat = confusion_matrix(y, y_pred)
bal_acc = 0.
for i in range(len(conf_mat)):
bal_acc += (float(conf_mat[i, i])) / np.sum(conf_mat[i])
bal_acc /= len(conf_mat)
return bal_acc
def compute_svm_cv(K, y, C=100.0, n_folds=5,
scoring=balanced_accuracy_scoring):
"""Compute cross-validated score of SVM with given precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds)
clf = SVC(C=C, kernel='precomputed', class_weight='auto')
scores = cross_val_score(clf, K, y,
scoring=scoring, cv=cv)
return scores.mean()
def compute_svm_subjects(K, y, n_folds=5):
"""
"""
cv = KFold(len(K)/2, n_folds)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
train_ids = np.concatenate((train, len(K)/2+train))
test_ids = np.concatenate((test, len(K)/2+test))
clf = SVC(kernel='precomputed')
clf.fit(K[train_ids, :][:, train_ids], y[train_ids])
scores[i] = clf.score(K[test_ids, :][:, train_ids], y[test_ids])
return scores.mean()
def permutation_subjects(y):
"""Permute class labels of Contextual Disorder dataset.
"""
y_perm = np.random.randint(0, 2, len(y)/2)
y_perm = np.concatenate((y_perm, np.logical_not(y_perm).astype(int)))
return y_perm
def permutation_subjects_ktst(y):
"""Permute class labels of Contextual Disorder dataset for KTST.
"""
yp = np.random.randint(0, 2, len(y)/2)
yp = np.concatenate((yp, np.logical_not(yp).astype(int)))
y_perm = np.arange(len(y))
for i in range(len(y)/2):
if yp[i] == 1:
y_perm[i] = len(y)/2+i
y_perm[len(y)/2+i] = i
return y_perm
def compute_svm_score_nestedCV(K, y, n_folds,
scoring=balanced_accuracy_scoring,
random_state=None,
param_grid=[{'C': np.logspace(-5, 5, 25)}]):
"""Compute cross-validated score of SVM using precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
cvclf = SVC(kernel='precomputed')
y_train = y[train]
cvcv = StratifiedKFold(y_train, n_folds=n_folds,
shuffle=True,
random_state=random_state)
clf = GridSearchCV(cvclf, param_grid=param_grid, scoring=scoring,
cv=cvcv, n_jobs=1)
clf.fit(K[train, :][:, train], y_train)
# print clf.best_params_
scores[i] = clf.score(K[test, :][:, train], y[test])
return scores.mean()
def apply_svm(K, y, n_folds=5, iterations=10000, subjects=False, verbose=True,
random_state=None):
"""
Compute the balanced accuracy, its null distribution and the p-value.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
cv: Number of folds in the stratified cross-validation
verbose: bool
Verbosity
Returns:
-------
acc: float
Average balanced accuracy.
acc_null: array
Null distribution of the balanced accuracy.
p_value: float
p-value
"""
# Computing the accuracy
param_grid = [{'C': np.logspace(-5, 5, 20)}]
if subjects:
acc = compute_svm_subjects(K, y, n_folds)
else:
acc = compute_svm_score_nestedCV(K, y, n_folds, param_grid=param_grid,
random_state=random_state)
if verbose:
print("Mean balanced accuracy = %s" % (acc))
print("Computing the null-distribution.")
# Computing the null-distribution
# acc_null = np.zeros(iterations)
# for i in range(iterations):
# if verbose and (i % 1000) == 0:
# print(i),
# stdout.flush()
# y_perm = np.random.permutation(y)
# acc_null[i] = compute_svm_score_nestedCV(K, y_perm, n_folds,
# param_grid=param_grid)
# if verbose:
# print ''
# Computing the null-distribution
if subjects:
yis = [permutation_subjects(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_subjects)(K, yis[i], n_folds) for i in range(iterations))
else:
yis = [np.random.permutation(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, yis[i], n_folds, scoring=balanced_accuracy_scoring, param_grid=param_grid) for i in range(iterations))
# acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_cv)(K, yis[i], C=100., n_folds=n_folds) for i in range(iterations))
p_value = max(1.0 / iterations, (acc_null > acc).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return acc, acc_null, p_value
def apply_ktst(K, y, iterations=10000, subjects=False, verbose=True):
"""
Compute MMD^2_u, its null distribution and the p-value of the
kernel two-sample test.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
verbose: bool
Verbosity
Returns:
-------
mmd2u: float
MMD^2_u value.
acc_null: array
Null distribution of the MMD^2_u
p_value: float
p-value
"""
assert len(np.unique(y)) == 2, 'KTST only works on binary problems'
# Assuming that the first m rows of the kernel matrix are from one
# class and the other n rows from the second class.
m = len(y[y == 0])
n = len(y[y == 1])
mmd2u = MMD2u(K, m, n)
if verbose:
print("MMD^2_u = %s" % mmd2u)
print("Computing the null distribution.")
if subjects:
perms = [permutation_subjects_ktst(y) for i in range(iterations)]
mmd2u_null = compute_null_distribution_given_permutations(K, m, n,
perms,
iterations)
else:
mmd2u_null = compute_null_distribution(K, m, n, iterations,
verbose=verbose)
p_value = max(1.0/iterations, (mmd2u_null > mmd2u).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return mmd2u, mmd2u_null, p_value
def plot_null_distribution(stats, stats_null, p_value, data_name='',
stats_name='$MMD^2_u$', save_figure=True):
"""Plot the observed value for the test statistic, its null
distribution and p-value.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
prob, bins, patches = plt.hist(stats_null, bins=50, normed=True)
ax.plot(stats, prob.max()/30, 'w*', markersize=15,
markeredgecolor='k', markeredgewidth=2,
label="%s = %s" % (stats_name, stats))
ax.annotate('p-value: %s' % (p_value),
xy=(float(stats), prob.max()/9.), xycoords='data',
xytext=(-105, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="1."),
arrowprops={"arrowstyle": "->",
"connectionstyle": "angle,angleA=0,angleB=90,rad=10"},
)
plt.xlabel(stats_name)
plt.ylabel('p(%s)' % stats_name)
plt.legend(numpoints=1)
plt.title('Data: %s' % data_name)
if save_figure:
save_dir = 'figures'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
stn = 'ktst' if stats_name == '$MMD^2_u$' else 'clf'
fig_name = os.path.join(save_dir, '%s_%s.pdf' % (data_name, stn))
fig.savefig(fig_name)
|
py | b40a22e7bd2b6a49c8f8a71c301bdb1db79e933a | from datetime import datetime
from flask import render_template, flash, redirect, url_for, request, g, \
jsonify, current_app
from flask_login import current_user, login_required
from flask_babel import _, get_locale
from guess_language import *
from app import db
from app.main.forms import EditProfileForm, PostForm, SearchForm
from app.models import User, Post
from app.translate import translate
from app.main import bp
@bp.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
g.search_form = SearchForm()
g.locale = str(get_locale())
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
@login_required
def index():
form = PostForm()
if form.validate_on_submit():
language = guess_language(form.post.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
post = Post(body=form.post.data, author=current_user,
language=language)
db.session.add(post)
db.session.commit()
flash(_('Your post is now live!'))
return redirect(url_for('main.index'))
page = request.args.get('page', 1, type=int)
posts = current_user.followed_posts().paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.index', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('main.index', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html', title=_('Home'), form=form,
posts=posts.items, next_url=next_url,
prev_url=prev_url)
@bp.route('/explore')
@login_required
def explore():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.explore', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('main.explore', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html', title=_('Explore'),
posts=posts.items, next_url=next_url,
prev_url=prev_url)
@bp.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
posts = user.posts.order_by(Post.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.user', username=user.username,
page=posts.next_num) if posts.has_next else None
prev_url = url_for('main.user', username=user.username,
page=posts.prev_num) if posts.has_prev else None
return render_template('user.html', user=user, posts=posts.items,
next_url=next_url, prev_url=prev_url)
@bp.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash(_('Your changes have been saved.'))
return redirect(url_for('main.edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title=_('Edit Profile'),
form=form)
@bp.route('/follow/<username>')
@login_required
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(_('User %(username)s not found.', username=username))
return redirect(url_for('main.index'))
if user == current_user:
flash(_('You cannot follow yourself!'))
return redirect(url_for('main.user', username=username))
current_user.follow(user)
db.session.commit()
flash(_('You are following %(username)s!', username=username))
return redirect(url_for('main.user', username=username))
@bp.route('/unfollow/<username>')
@login_required
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(_('User %(username)s not found.', username=username))
return redirect(url_for('main.index'))
if user == current_user:
flash(_('You cannot unfollow yourself!'))
return redirect(url_for('main.user', username=username))
current_user.unfollow(user)
db.session.commit()
flash(_('You are not following %(username)s.', username=username))
return redirect(url_for('main.user', username=username))
@bp.route('/translate', methods=['POST'])
@login_required
def translate_text():
return jsonify({'text': translate(request.form['text'],
request.form['source_language'],
request.form['dest_language'])})
@bp.route('/search')
@login_required
def search():
if not g.search_form.validate():
return redirect(url_for('main.explore'))
page = request.args.get('page', 1, type=int)
posts, total = Post.search(g.search_form.q.data, page,
current_app.config['POSTS_PER_PAGE'])
next_url = url_for('main.search', q=g.search_form.q.data, page=page + 1) \
if total > page * current_app.config['POSTS_PER_PAGE'] else None
prev_url = url_for('main.search', q=g.search_form.q.data, page=page - 1) \
if page > 1 else None
return render_template('search.html', title=_('Search'), posts=posts,
next_url=next_url, prev_url=prev_url) |
py | b40a238caff36a5bb2855876ef9633afea181ecb | """
ModelLibrary
Ask for model using get. Handle loading, refresh...
"""
import collections
import os
import re
from typing import (
Any,
Dict,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import humanize
import pydantic
import redis
from asgiref.sync import AsyncToSync
from rich.console import Console
from rich.tree import Tree
from structlog import get_logger
import modelkit.assets
from modelkit.assets.manager import AssetsManager
from modelkit.assets.settings import AssetSpec
from modelkit.core import errors
from modelkit.core.model import Asset, AsyncModel, Model
from modelkit.core.model_configuration import ModelConfiguration, configure, list_assets
from modelkit.core.settings import LibrarySettings, NativeCacheSettings, RedisSettings
from modelkit.core.types import LibraryModelsType
from modelkit.utils.cache import Cache, NativeCache, RedisCache
from modelkit.utils.memory import PerformanceTracker
from modelkit.utils.pretty import describe
from modelkit.utils.redis import RedisCacheException
logger = get_logger(__name__)
class ConfigurationNotFoundException(Exception):
pass
T = TypeVar("T", bound=Model)
class AssetInfo(pydantic.BaseModel):
path: str
version: Optional[str]
class ModelLibrary:
def __init__(
self,
settings: Optional[Union[Dict, LibrarySettings]] = None,
assetsmanager_settings: Optional[dict] = None,
configuration: Optional[
Dict[str, Union[Dict[str, Any], ModelConfiguration]]
] = None,
models: Optional[LibraryModelsType] = None,
required_models: Optional[Union[List[str], Dict[str, Any]]] = None,
):
"""
Create a prediction service
:param models: a `Model` class, a module, or a list of either in which the
ModelLibrary will look for configurations.
:param configuration: used to override configurations obtained from `models`
:param required_models: used to restrict the models that are preloaded.
:type settings: dict of additional settings (lazy_loading, etc.)
:param assetsmanager_settings: settings passed to the AssetsManager
"""
if isinstance(settings, dict):
settings = LibrarySettings(**settings)
self.settings: LibrarySettings = settings or LibrarySettings()
self.assetsmanager_settings: Dict[str, Any] = assetsmanager_settings or {}
self._override_assets_manager: Optional[AssetsManager] = None
self._lazy_loading: bool = self.settings.lazy_loading
if models is None:
models = os.environ.get("MODELKIT_DEFAULT_PACKAGE")
self.configuration: Dict[str, ModelConfiguration] = configure(
models=models, configuration=configuration
)
self.models: Dict[str, Asset] = {}
self.assets_info: Dict[str, AssetInfo] = {}
self._assets_manager: Optional[AssetsManager] = None
required_models = (
required_models
if required_models is not None
else {r: {} for r in self.configuration}
)
if isinstance(required_models, list):
required_models = {r: {} for r in required_models}
self.required_models: Dict[str, Dict[str, Any]] = required_models
self.cache: Optional[Cache] = None
if self.settings.cache:
if isinstance(self.settings.cache, RedisSettings):
try:
self.cache = RedisCache(
self.settings.cache.host, self.settings.cache.port
)
except (ConnectionError, redis.ConnectionError):
logger.error(
"Cannot ping redis instance",
cache_host=self.settings.cache.host,
port=self.settings.cache.port,
)
raise RedisCacheException(
"Cannot ping redis instance"
f"[cache_host={self.settings.cache.host}, "
f"port={self.settings.cache.port}]"
)
if isinstance(self.settings.cache, NativeCacheSettings):
self.cache = NativeCache(
self.settings.cache.implementation, self.settings.cache.maxsize
)
if not self._lazy_loading:
self.preload()
@property
def assets_manager(self):
if self._assets_manager is None:
logger.info("Instantiating AssetsManager", lazy_loading=self._lazy_loading)
self._assets_manager = AssetsManager(**self.assetsmanager_settings)
return self._assets_manager
@property
def override_assets_manager(self):
if not self.settings.override_assets_dir:
return None
if self._override_assets_manager is None:
logger.info(
"Instantiating Override AssetsManager", lazy_loading=self._lazy_loading
)
self._override_assets_manager = AssetsManager(
assets_dir=self.settings.override_assets_dir
)
self._override_assets_manager.storage_provider = None
return self._override_assets_manager
def get(self, name, model_type: Optional[Type[T]] = None) -> T:
"""
Get a model by name
:param name: The name of the required model
:return: required model
"""
if self._lazy_loading:
# When in lazy mode ensure the model object and its dependencies
# are instantiated, this will download the asset
if name not in self.models:
self._load(name)
# Ensure that it is loaded
if not self.models[name]._loaded:
self.models[name].load()
if name not in self.models:
raise errors.ModelsNotFound(
f"Model `{name}` not loaded."
+ (
f" (loaded models: {', '.join(self.models)})."
if self.models
else "."
)
)
m = self.models[name]
if model_type and not isinstance(m, model_type):
raise ValueError(f"Model `{m}` is not an instance of {model_type}")
return cast(T, m)
def _load(self, model_name):
"""
This function loads a configured model by name.
"""
with PerformanceTracker() as m:
self._check_configurations(model_name)
self._resolve_assets(model_name)
self._load_model(model_name)
logger.info(
"Model and dependencies loaded",
name=model_name,
time=humanize.naturaldelta(m.time, minimum_unit="seconds"),
time_s=m.time,
memory=humanize.naturalsize(m.increment)
if m.increment is not None
else None,
memory_bytes=m.increment,
)
def _check_configurations(self, configuration_key):
if configuration_key not in self.configuration:
logger.error(
"Cannot find model configuration", name=configuration_key, sentry=True
)
candidates = {x: collections.Counter(x) for x in self.configuration}
configuration = collections.Counter(configuration_key)
differences = sorted(
(
sum(x for x in (configuration - candidate).values())
+ sum(x for x in (candidate - configuration).values()),
key,
)
for key, candidate in candidates.items()
)
msg = (
f"Cannot resolve assets for model `{configuration_key}`: "
"configuration not found.\n\n"
f"Configured models: {', '.join(sorted(self.configuration))}.\n\n"
)
if differences and differences[0] and differences[0][0] < 10:
msg += f'Did you mean "{differences[0][1]}" ?\n'
raise ConfigurationNotFoundException(msg)
configuration = self.configuration[configuration_key]
for dep_name in configuration.model_dependencies.values():
self._check_configurations(dep_name)
def _load_model(self, model_name, model_settings=None):
"""
This function loads dependent models for the current models, populating
the _models dictionary with the instantiated model objects.
"""
logger.debug("Loading model", model_name=model_name)
if model_name in self.models:
logger.debug("Model already loaded", model_name=model_name)
return
configuration = self.configuration[model_name]
# First, load dependent predictors and add them to the model
model_dependencies = {}
for dep_ref_name, dep_name in configuration.model_dependencies.items():
if dep_name not in self.models:
self._load_model(dep_name)
model_dependencies[dep_ref_name] = self.models[dep_name]
model_settings = {
**configuration.model_settings,
**self.required_models.get(model_name, {}),
}
logger.debug("Instantiating Model object", model_name=model_name)
self.models[model_name] = configuration.model_type(
asset_path=self.assets_info[configuration.asset].path
if configuration.asset
else "",
model_dependencies=model_dependencies,
service_settings=self.settings,
model_settings=model_settings or {},
configuration_key=model_name,
cache=self.cache,
)
logger.debug("Done loading Model", model_name=model_name)
def _resolve_assets(self, model_name):
"""
This function fetches assets for the current model and its dependent models
and populates the assets_info dictionary with the paths.
"""
logger.debug("Resolving asset for Model", model_name=model_name)
configuration = self.configuration[model_name]
# First, resolve assets from dependent models
for dep_name in configuration.model_dependencies.values():
self._resolve_assets(dep_name)
if not configuration.asset:
# If the model has no asset to load
return
model_settings = {
**configuration.model_settings,
**self.required_models.get(model_name, {}),
}
# If the asset is overriden in the model_settings
if "asset_path" in model_settings:
asset_path = model_settings.pop("asset_path")
logger.debug(
"Overriding asset from Model settings",
model_name=model_name,
asset_path=asset_path,
)
self.assets_info[configuration.asset] = AssetInfo(path=asset_path)
asset_spec = AssetSpec.from_string(configuration.asset)
# If the model's asset is overriden with environment variables
venv = "MODELKIT_{}_FILE".format(
re.sub(r"[\/\-\.]+", "_", asset_spec.name).upper()
)
local_file = os.environ.get(venv)
if local_file:
logger.debug(
"Overriding asset from environment variable",
asset_name=asset_spec.name,
path=local_file,
)
self.assets_info[configuration.asset] = AssetInfo(path=local_file)
# The assets should be retrieved
# possibly override version
venv = "MODELKIT_{}_VERSION".format(
re.sub(r"[\/\-\.]+", "_", asset_spec.name).upper()
)
version = os.environ.get(venv)
if version:
logger.debug(
"Overriding asset version from environment variable",
asset_name=asset_spec.name,
path=local_file,
)
asset_spec = AssetSpec.from_string(asset_spec.name + ":" + version)
if self.override_assets_manager:
try:
self.assets_info[configuration.asset] = AssetInfo(
**self.override_assets_manager.fetch_asset(
spec=AssetSpec(
name=asset_spec.name, sub_part=asset_spec.sub_part
),
return_info=True,
)
)
logger.debug(
"Asset has been overriden",
name=asset_spec.name,
)
except modelkit.assets.errors.AssetDoesNotExistError:
logger.debug(
"Asset not found in overriden prefix",
name=asset_spec.name,
)
if configuration.asset not in self.assets_info:
self.assets_info[configuration.asset] = AssetInfo(
**self.assets_manager.fetch_asset(asset_spec, return_info=True)
)
def preload(self):
# make sure the assets_manager is instantiated
self.assets_manager
for model_name in self.required_models:
self._load(model_name)
def close(self):
for model in self.models.values():
if isinstance(model, Model):
model.close()
if isinstance(model, AsyncModel):
AsyncToSync(model.close)()
async def aclose(self):
for model in self.models.values():
if isinstance(model, Model):
model.close()
if isinstance(model, AsyncModel):
await model.close()
def describe(self, console=None) -> None:
if not console:
console = Console()
t = Tree("[bold]Settings")
console.print(describe(self.settings, t=t))
t = Tree("[bold]Configuration")
console.print(describe(self.configuration, t=t))
t = Tree("[bold]Assets")
if not self.assets_info:
t.add("[dim][italic]No assets loaded")
else:
describe(self.assets_info, t=t)
console.print(t)
t = Tree("[bold]Models")
if not self.models:
t.add("[dim][italic]No models loaded")
else:
describe(self.models, t=t)
console.print(t)
def load_model(
model_name,
configuration: Optional[
Dict[str, Union[Dict[str, Any], ModelConfiguration]]
] = None,
models: Optional[LibraryModelsType] = None,
model_type: Optional[Type[T]] = None,
) -> T:
"""
Loads an modelkit model without the need for a ModelLibrary.
This is useful for development, and should be avoided in production
code.
"""
lib = ModelLibrary(
required_models=[model_name],
models=models,
configuration=configuration,
settings={"lazy_loading": True},
)
return lib.get(model_name, model_type=model_type)
def download_assets(
assetsmanager_settings: Optional[dict] = None,
configuration: Optional[
Mapping[str, Union[Dict[str, Any], ModelConfiguration]]
] = None,
models: Optional[LibraryModelsType] = None,
required_models: Optional[List[str]] = None,
) -> Tuple[Dict[str, Set[str]], Dict[str, AssetInfo]]:
assetsmanager_settings = assetsmanager_settings or {}
assets_manager = AssetsManager(**assetsmanager_settings)
configuration = configure(models=models, configuration=configuration)
models_assets = {}
assets_info = {}
required_models = required_models or [r for r in configuration]
for model in required_models:
models_assets[model] = list_assets(
required_models=[model], configuration=configuration
)
for asset in models_assets[model]:
if asset in assets_info:
continue
assets_info[asset] = AssetInfo(
**assets_manager.fetch_asset(
asset,
return_info=True,
)
)
return models_assets, assets_info
|
py | b40a23bb031cb391ff6011d57fe91c8164b5c053 |
class LogEventArgs:
ScriptContainer = None
ScriptHash = None
Message = None
def __init__(self, container, script_hash, message):
self.ScriptContainer = container
self.ScriptHash = script_hash
self.Message = message
|
py | b40a23d221642fe87f354e4ee321e72455b1da53 | # Plot theoretical rates of convergence
import numpy as np
import matplotlib.pyplot as plt
import os
def save_fig(fname):
figdir = os.path.join(os.environ["PYPROBML"], "figures")
plt.tight_layout()
fullname = os.path.join(figdir, fname)
print('saving to {}'.format(fullname))
plt.savefig(fullname)
plt.figure(figsize=(12,4))
ks = range(1,10)
ys = [1.0/k for k in ks]
print(ys)
plt.subplot(1,3,1)
plt.plot(ks, np.log(ys), color = 'r')
plt.title('Sublinear convergence')
ys = [1.0/(2**k) for k in ks]
print(ys)
plt.subplot(1,3,2)
plt.plot(ks, np.log(ys), color = 'g')
plt.title('Linear convergence')
ys = [1.0/(2**(2**k)) for k in ks]
print(ys)
plt.subplot(1,3,3)
plt.plot(ks, np.log(ys), color = 'b')
plt.title('Quadratic convergence')
#fig.subplots_adjust(hspace=0)
plt.tight_layout()
plt.draw()
fname = 'convergenceRates.pdf'
print(fname)
save_fig(fname)
plt.show()
|
py | b40a23fb4556ab660ee7c135a60562966561cede | """
SparseArray data structure
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231
from numpy import nan, ndarray
import numpy as np
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas import compat, lib
from pandas.compat import range
from pandas._sparse import BlockIndex, IntIndex
import pandas._sparse as splib
import pandas.index as _index
import pandas.core.ops as ops
def _arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
if isinstance(other, np.ndarray):
if len(self) != len(other):
raise AssertionError("length mismatch: %d vs. %d" %
(len(self), len(other)))
if not isinstance(other, com.ABCSparseArray):
other = SparseArray(other, fill_value=self.fill_value)
if name[0] == 'r':
return _sparse_array_op(other, self, op, name[1:])
else:
return _sparse_array_op(self, other, op, name)
elif np.isscalar(other):
new_fill_value = op(np.float64(self.fill_value),
np.float64(other))
return SparseArray(op(self.sp_values, other),
sparse_index=self.sp_index,
fill_value=new_fill_value)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
if name.startswith("__"):
name = name[2:-2]
wrapper.__name__ = name
return wrapper
def _sparse_array_op(left, right, op, name):
if np.isnan(left.fill_value):
sparse_op = lambda a, b: _sparse_nanop(a, b, name)
else:
sparse_op = lambda a, b: _sparse_fillop(a, b, name)
if left.sp_index.equals(right.sp_index):
result = op(left.sp_values, right.sp_values)
result_index = left.sp_index
else:
result, result_index = sparse_op(left, right)
try:
fill_value = op(left.fill_value, right.fill_value)
except:
fill_value = nan
return SparseArray(result, sparse_index=result_index,
fill_value=fill_value)
def _sparse_nanop(this, other, name):
sparse_op = getattr(splib, 'sparse_nan%s' % name)
result, result_index = sparse_op(this.sp_values,
this.sp_index,
other.sp_values,
other.sp_index)
return result, result_index
def _sparse_fillop(this, other, name):
sparse_op = getattr(splib, 'sparse_%s' % name)
result, result_index = sparse_op(this.sp_values,
this.sp_index,
this.fill_value,
other.sp_values,
other.sp_index,
other.fill_value)
return result, result_index
class SparseArray(PandasObject, np.ndarray):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Defaults to NaN (code for missing)
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseArray objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
__array_priority__ = 15
_typ = 'array'
_subtyp = 'sparse_array'
sp_index = None
fill_value = None
def __new__(
cls, data, sparse_index=None, index=None, kind='integer', fill_value=None,
dtype=np.float64, copy=False):
if index is not None:
if data is None:
data = np.nan
if not np.isscalar(data):
raise Exception("must only pass scalars with an index ")
values = np.empty(len(index), dtype='float64')
values.fill(data)
data = values
if dtype is not None:
dtype = np.dtype(dtype)
is_sparse_array = isinstance(data, SparseArray)
if fill_value is None:
if is_sparse_array:
fill_value = data.fill_value
else:
fill_value = nan
if is_sparse_array:
sparse_index = data.sp_index
values = np.asarray(data)
else:
# array-like
if sparse_index is None:
values, sparse_index = make_sparse(data, kind=kind,
fill_value=fill_value)
else:
values = data
if len(values) != sparse_index.npoints:
raise AssertionError("Non array-like type {0} must have"
" the same length as the"
" index".format(type(values)))
# Create array, do *not* copy data by default
if copy:
subarr = np.array(values, dtype=dtype, copy=True)
else:
subarr = np.asarray(values, dtype=dtype)
# if we have a bool type, make sure that we have a bool fill_value
if (dtype is not None and issubclass(dtype.type, np.bool_)) or (data is not None and lib.is_bool_array(subarr)):
if np.isnan(fill_value) or not fill_value:
fill_value = False
else:
fill_value = bool(fill_value)
# Change the class of the array to be the subclass type.
output = subarr.view(cls)
output.sp_index = sparse_index
output.fill_value = fill_value
return output
@property
def _constructor(self):
return lambda x: SparseArray(x, fill_value=self.fill_value,
kind=self.kind)
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.sp_index = getattr(obj, 'sp_index', None)
self.fill_value = getattr(obj, 'fill_value', None)
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(ndarray.__reduce__(self))
subclass_state = self.fill_value, self.sp_index
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
fill_value, sp_index = own_state[:2]
self.sp_index = sp_index
self.fill_value = fill_value
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def __unicode__(self):
return '%s\nFill: %s\n%s' % (com.pprint_thing(self),
com.pprint_thing(self.fill_value),
com.pprint_thing(self.sp_index))
def disable(self, other):
raise NotImplementedError('inplace binary ops not supported')
# Inplace operators
__iadd__ = disable
__isub__ = disable
__imul__ = disable
__itruediv__ = disable
__ifloordiv__ = disable
__ipow__ = disable
# Python 2 division operators
if not compat.PY3:
__idiv__ = disable
@property
def values(self):
"""
Dense values
"""
output = np.empty(len(self), dtype=np.float64)
int_index = self.sp_index.to_int_index()
output.fill(self.fill_value)
output.put(int_index.indices, self)
return output
@property
def sp_values(self):
# caching not an option, leaks memory
return self.view(np.ndarray)
def get_values(self, fill=None):
""" return a dense representation """
return self.to_dense(fill=fill)
def to_dense(self, fill=None):
"""
Convert SparseSeries to (dense) Series
"""
values = self.values
# fill the nans
if fill is None:
fill = self.fill_value
if not np.isnan(fill):
values[np.isnan(values)] = fill
return values
def __iter__(self):
for i in range(len(self)):
yield self._get_val_at(i)
def __getitem__(self, key):
"""
"""
if com.is_integer(key):
return self._get_val_at(key)
else:
if isinstance(key, SparseArray):
key = np.asarray(key)
if hasattr(key,'__len__') and len(self) != len(key):
indices = self.sp_index
if hasattr(indices,'to_int_index'):
indices = indices.to_int_index()
data_slice = self.values.take(indices.indices)[key]
else:
data_slice = self.values[key]
return self._constructor(data_slice)
def __getslice__(self, i, j):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j)
return self.__getitem__(slobj)
def _get_val_at(self, loc):
n = len(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return _index.get_value_at(self, sp_loc)
def take(self, indices, axis=0):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
if axis:
raise ValueError("axis must be 0, input was {0}".format(axis))
indices = np.atleast_1d(np.asarray(indices, dtype=int))
# allow -1 to indicate missing values
n = len(self)
if ((indices >= n) | (indices < -1)).any():
raise IndexError('out of bounds access')
if self.sp_index.npoints > 0:
locs = np.array([self.sp_index.lookup(loc) if loc > -1 else -1
for loc in indices])
result = self.sp_values.take(locs)
mask = locs == -1
if mask.any():
try:
result[mask] = self.fill_value
except ValueError:
# wrong dtype
result = result.astype('float64')
result[mask] = self.fill_value
else:
result = np.empty(len(indices))
result.fill(self.fill_value)
return result
def __setitem__(self, key, value):
# if com.is_integer(key):
# self.values[key] = value
# else:
# raise Exception("SparseArray does not support seting non-scalars via setitem")
raise TypeError(
"SparseArray does not support item assignment via setitem")
def __setslice__(self, i, j, value):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j)
# if not np.isscalar(value):
# raise Exception("SparseArray does not support seting non-scalars via slices")
#x = self.values
#x[slobj] = value
#self.values = x
raise TypeError(
"SparseArray does not support item assignment via slices")
def astype(self, dtype=None):
"""
"""
dtype = np.dtype(dtype)
if dtype is not None and dtype not in (np.float_, float):
raise TypeError('Can only support floating point data for now')
return self.copy()
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
if deep:
values = self.sp_values.copy()
else:
values = self.sp_values
return SparseArray(values, sparse_index=self.sp_index,
dtype=self.dtype,
fill_value=self.fill_value)
def count(self):
"""
Compute sum of non-NA/null observations in SparseSeries. If the
fill_value is not NaN, the "sparse" locations will be included in the
observation count
Returns
-------
nobs : int
"""
sp_values = self.sp_values
valid_spvals = np.isfinite(sp_values).sum()
if self._null_fill_value:
return valid_spvals
else:
return valid_spvals + self.sp_index.ngaps
@property
def _null_fill_value(self):
return np.isnan(self.fill_value)
@property
def _valid_sp_values(self):
sp_vals = self.sp_values
mask = np.isfinite(sp_vals)
return sp_vals[mask]
def sum(self, axis=None, dtype=None, out=None):
"""
Sum of non-NA/null values
Returns
-------
sum : float
"""
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
if self._null_fill_value:
return sp_sum
else:
nsparse = self.sp_index.ngaps
return sp_sum + self.fill_value * nsparse
def cumsum(self, axis=0, dtype=None, out=None):
"""
Cumulative sum of values. Preserves locations of NaN values
Extra parameters are to preserve ndarray interface.
Returns
-------
cumsum : Series
"""
if com.notnull(self.fill_value):
return self.to_dense().cumsum()
# TODO: what if sp_values contains NaN??
return SparseArray(self.sp_values.cumsum(),
sparse_index=self.sp_index,
fill_value=self.fill_value)
def mean(self, axis=None, dtype=None, out=None):
"""
Mean of non-NA/null values
Returns
-------
mean : float
"""
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
ct = len(valid_vals)
if self._null_fill_value:
return sp_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
def _maybe_to_dense(obj):
""" try to convert to dense """
if hasattr(obj, 'to_dense'):
return obj.to_dense()
return obj
def _maybe_to_sparse(array):
if isinstance(array, com.ABCSparseSeries):
array = SparseArray(
array.values, sparse_index=array.sp_index, fill_value=array.fill_value, copy=True)
if not isinstance(array, SparseArray):
array = com._values_from_object(array)
return array
def make_sparse(arr, kind='block', fill_value=nan):
"""
Convert ndarray to sparse format
Parameters
----------
arr : ndarray
kind : {'block', 'integer'}
fill_value : NaN or another value
Returns
-------
(sparse_values, index) : (ndarray, SparseIndex)
"""
if hasattr(arr, 'values'):
arr = arr.values
else:
if np.isscalar(arr):
arr = [arr]
arr = np.asarray(arr)
length = len(arr)
if np.isnan(fill_value):
mask = ~np.isnan(arr)
else:
mask = arr != fill_value
length = len(arr)
if length != mask.size:
# the arr is a SparseArray
indices = mask.sp_index.indices
else:
indices = np.arange(length, dtype=np.int32)[mask]
if kind == 'block':
locs, lens = splib.get_blocks(indices)
index = BlockIndex(length, locs, lens)
elif kind == 'integer':
index = IntIndex(length, indices)
else: # pragma: no cover
raise ValueError('must be block or integer type')
sparsified_values = arr[mask]
return sparsified_values, index
ops.add_special_arithmetic_methods(SparseArray,
arith_method=_arith_method,
use_numexpr=False)
def _concat_compat(to_concat, axis=0):
"""
provide concatenation of an sparse/dense array of arrays each of which is a single dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
Returns
-------
a single array, preserving the combined dtypes
"""
def convert_sparse(x, axis):
# coerce to native type
if isinstance(x, SparseArray):
x = x.get_values()
x = x.ravel()
if axis > 0:
x = np.atleast_2d(x)
return x
typs = com.get_dtype_kinds(to_concat)
# we have more than one type here, so densify and regular concat
to_concat = [ convert_sparse(x, axis) for x in to_concat ]
result = np.concatenate(to_concat,axis=axis)
if not len(typs-set(['sparse','f','i'])):
# we can remain sparse
result = SparseArray(result.ravel())
else:
# coerce to object if needed
result = result.astype('object')
return result
|
py | b40a24219e4f05dd3294262f8ead7199938fb802 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribePropertyRequest(JDCloudRequest):
"""
查询属性接口
"""
def __init__(self, parameters, header=None, version="v2"):
super(DescribePropertyRequest, self).__init__(
'/regions/{regionId}/coreinstances/{instanceId}/property:describe', 'GET', header, version)
self.parameters = parameters
class DescribePropertyParameters(object):
def __init__(self, regionId, instanceId, deviceId):
"""
:param regionId: 区域id
:param instanceId: 实例Id
:param deviceId: 设备ID
"""
self.regionId = regionId
self.instanceId = instanceId
self.deviceId = deviceId
|
py | b40a24b1b84590432a339ee0e8fac4f84e897ac1 | import numpy as np
import gzip
import pickle
import os
import urllib.request
class MNIST:
host = 'http://yann.lecun.com/exdb/mnist/'
filenames = {
'train': ('train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz'),
'test': ('t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'),
}
dataset_filename = 'mnist.pkl.gz'
train_samples = 50000
validation_samples = 10000
test_samples = 10000
def __init__(self):
self.current_dir = os.path.dirname(__file__)
if not self.is_dataset_available():
print('Dataset not available! It will be downloaded and decoded, and can be take a while, please wait!')
datasets = self.get_base_datasets_filenames()
for dataset in datasets:
if not self.is_base_dataset_downloaded(dataset):
print(f'Downloading {dataset}...')
self.download_dataset(dataset)
print('Decoding files and saving it...')
self.decode_and_save()
print('Deleting base files (downloaded)...')
for dataset in datasets:
self.delete_dataset(dataset)
print('Done.')
def is_dataset_available(self):
return os.path.exists(os.path.join(self.current_dir, self.dataset_filename))
def get_base_datasets_filenames(self):
return self.filenames['train'] + self.filenames['test']
def is_base_dataset_downloaded(self, filename):
return os.path.exists(os.path.join(self.current_dir, filename))
def download_dataset(self, filename):
url = self.host + filename
dest = os.path.join(self.current_dir, filename)
urllib.request.urlretrieve(url, dest)
def delete_dataset(self, filename):
os.remove(os.path.join(self.current_dir, filename))
def decode_and_save(self):
data = {}
for key, (images_filename, labels_filename) in self.filenames.items():
with gzip.open(os.path.join(self.current_dir, images_filename), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16).reshape(-1, 28*28)
with gzip.open(os.path.join(self.current_dir, labels_filename), 'rb') as file:
labels = np.frombuffer(file.read(), np.uint8, offset=8)
data[key] = (images, labels)
training = tuple(x[:self.train_samples] for x in data['train'])
validation = tuple(x[self.train_samples:] for x in data['train'])
test = tuple(data['test'])
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'wb') as file:
pickle.dump((training, validation, test), file)
def load(self):
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'rb') as file:
training, validation, test = pickle.load(file)
return training, validation, test
|
py | b40a253cba42d9cb0f0c322af1ef5601c5edc604 | import PIL
from PIL import Image, ImageTk
import tkinter as tk
import argparse
import datetime
import cv2
import os
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture('Kaabil Hoon (Kaabil) Hrithik Roshan (2K Ultra HD 1440p)-(HDLoft.Com).mp4') # capture video frames, 0 is your default video camera
self.output_path = output_path # store output path
self.current_image = None # current image from the camera
self.root = tk.Tk() # initialize root window
self.root.title("PyImageSearch PhotoBooth") # set window title
# self.destructor function gets fired when the window is closed
self.root.protocol('WM_DELETE_WINDOW', self.destructor)
self.panel = tk.Label(self.root) # initialize image panel
self.panel.pack(padx=10, pady=10)
self.root.config(cursor="arrow")
# create a button, that when pressed, will take the current frame and save it to file
btn = tk.Button(self.root, text="Snapshot!", command=self.take_snapshot)
btn.pack(fill="both", expand=True, padx=10, pady=10)
# start a self.video_loop that constantly pools the video sensor
# for the most recently read frame
self.video_loop()
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
ok, frame = self.vs.read() # read frame from video stream
# frame = cv2.resize(frame, (1500,1000))
if ok: # frame captured without any errors
key = cv2.waitKey(1000)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA
self.current_image = Image.fromarray(cv2image) # convert image for PIL
#self.current_image= self.current_image.resize([1280,1024],PIL.Image.ANTIALIAS)
imgtk = ImageTk.PhotoImage(image=self.current_image) # convert image for tkinter
self.panel.imgtk = imgtk # anchor imgtk so it does not be deleted by garbage-collector
self.panel.config(image=imgtk) # show the image
#self.root.attributes("-fullscreen",True)
self.root.after(1, self.video_loop) # call the same function after 30 milliseconds
# def take_snapshot(self):
# """ Take snapshot and ...
a = Application() |
py | b40a26011e2971af73635b4dc81d8feb487212df | import uuid
from common.database import Database
from common.utils import Utils
import models.users.errors as UserErrors
class User(object):
def __init__(self, email, password, _id=None):
self.email = email
self.password = password
self._id = uuid.uuid4().hex if _id is None else _id
def __repr__(self):
return "<User {}>".format(self.email)
@staticmethod
def is_login_valid(email, password):
#This method verifies that the email/password combo sent by the site forms is valid or not
#param email: user's email
#parma password: a sha512 hashed password
user_data = Database.find_one("users", {"email":email}) #password in pbkdf2_sha512 (hashed twice)
if user_data is None:
#tell the user that thei email does not exist
raise UserErrors.UserNotExistsError("Your user does not exist!")
if not Utils.check_hashed_password(password, user_data['password']):
#tell the user that their password is wrong
raise UserErrors.IncorrectPasswordError("Your password is incorrect")
return True
@staticmethod
def register_user(email, password):
#This method registers a user using email and password. The password already comes as hashed sha512
#param email: user's email
#param password: sha-512 hashed password
#returns True if registered successfully, and False otherwise(exceptions can also be raised)
user_data = Database.find_one("users", {"email":email})
if user_data is not None:
#Tell user they are already registered
raise UserErrors.UserAlreadyRegisteredError("The email you used to register already exists.")
if not Utils.email_is_valid(email):
#Tell the user that their email is not constructed properly
raise UserErrors.InvalidEmailError("The email does not have the right format.")
User(email, Utils.hash_password(password)).save_to_db()
return True
def save_to_db(self):
Database.insert("users", self.json())
def json(self):
return {
"_id":self._id,
"email":self.email,
"password":self.password
}
|
py | b40a2682381ad50da67fe7499b75f4f862e00b3d | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DenseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, nn_ops.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_1')
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_2')
@test_util.run_deprecated_v1
def testVariableInput(self):
with self.cached_session():
v = variable_scope.get_variable(
'X', initializer=init_ops.zeros_initializer(), shape=(1, 1))
x = core_layers.Dense(1)(v)
variables.global_variables_initializer().run()
self.assertAllEqual(x.eval(), [[0.0]])
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testCall(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 2], outputs.get_shape().as_list())
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias.name, 'my_dense/bias:0')
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoEagerLeak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = random_ops.random_uniform((5, 4), seed=1)
core_layers.Dense(5)(inputs)
core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')(inputs)
@test_util.run_in_graph_and_eager_modes
def testCallTensorDot(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
@test_util.run_in_graph_and_eager_modes
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
@test_util.run_in_graph_and_eager_modes
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = random_ops.random_uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
@test_util.run_deprecated_v1
def testCallOnPlaceHolder(self):
inputs = array_ops.placeholder(dtype=dtypes.float32)
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
@test_util.run_in_graph_and_eager_modes
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if not context.executing_eagerly():
self.assertEqual(outputs.op.name, 'dense1/Relu')
dense = core_layers.Dense(2, name='dense2')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if not context.executing_eagerly():
self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
@test_util.run_deprecated_v1
def testActivityRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', activity_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
@test_util.run_deprecated_v1
def testKernelRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', kernel_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in dense.variables])
self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
@test_util.run_deprecated_v1
def testBiasRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in dense.variables])
self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys))
@test_util.run_deprecated_v1
def testFunctionalDense(self):
with self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = core_layers.dense(
inputs, 2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(outputs.op.name, 'my_dense/Relu')
@test_util.run_deprecated_v1
def testFunctionalDenseTwice(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
vars1 = _get_variable_dict_from_varstore().values()
core_layers.dense(inputs, 2)
vars2 = _get_variable_dict_from_varstore().values()
self.assertEqual(len(vars1), 2)
self.assertEqual(len(vars2), 4)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuse(self):
with self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
core_layers.dense(inputs, 2, name='my_dense', reuse=True)
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuseFromScope(self):
with self.cached_session():
with variable_scope.variable_scope('scope'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
with variable_scope.variable_scope('scope', reuse=True):
core_layers.dense(inputs, 2, name='my_dense')
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
@test_util.run_deprecated_v1
def testFunctionalDenseInitializerFromScope(self):
with variable_scope.variable_scope(
'scope',
initializer=init_ops.ones_initializer()), self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
variables.global_variables_initializer().run()
weights = _get_variable_dict_from_varstore()
self.assertEqual(len(weights), 2)
# Check that the matrix weights got initialized to ones (from scope).
self.assertAllClose(weights['scope/dense/kernel'].read_value().eval(),
np.ones((3, 2)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights['scope/dense/bias'].read_value().eval(),
np.zeros((2)))
def testEagerExecution(self):
with context.eager_mode():
container = variable_scope.EagerVariableStore()
x = constant_op.constant([[2.0]])
with container.as_default():
y = core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertAllEqual(y, [[2.0]])
self.assertEqual(len(container.variables()), 2)
# Recreate the layer to test reuse.
with container.as_default():
core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertEqual(len(container.variables()), 2)
def testFunctionalDenseWithCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
self.assertEqual(called[0], 2)
@test_util.run_deprecated_v1
def testFunctionalDenseInScope(self):
with self.cached_session():
with variable_scope.variable_scope('test'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
var_dict = _get_variable_dict_from_varstore()
var_key = 'test/my_dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test1') as scope:
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name=scope)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test1/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test2'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test2/dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
@test_util.run_in_graph_and_eager_modes
def testComputeOutputShape(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
ts = tensor_shape.TensorShape
# pylint: disable=protected-access
with self.assertRaises(ValueError):
dense.compute_output_shape(ts(None))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([]))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([1]))
self.assertEqual(
[None, 2],
dense.compute_output_shape((None, 3)).as_list())
self.assertEqual(
[None, 2],
dense.compute_output_shape(ts([None, 3])).as_list())
self.assertEqual(
[None, 4, 2],
dense.compute_output_shape(ts([None, 4, 3])).as_list())
# pylint: enable=protected-access
@test_util.run_in_graph_and_eager_modes
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
dense = core_layers.Dense(2,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3), seed=1)
dense(inputs)
self.assertEqual(dense.kernel_constraint, k_constraint)
self.assertEqual(dense.bias_constraint, b_constraint)
def _get_variable_dict_from_varstore():
var_dict = variable_scope._get_default_variable_store()._vars # pylint: disable=protected-access
sorted_var_dict = collections.OrderedDict(
sorted(var_dict.items(), key=lambda t: t[0]))
return sorted_var_dict
class DropoutTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDropoutProperties(self):
dp = core_layers.Dropout(0.5, name='dropout')
self.assertEqual(dp.rate, 0.5)
self.assertEqual(dp.noise_shape, None)
dp.apply(array_ops.ones(()))
self.assertEqual(dp.name, 'dropout')
@test_util.run_in_graph_and_eager_modes
def testBooleanLearningPhase(self):
dp = core_layers.Dropout(0.5)
inputs = array_ops.ones((5, 3))
dropped = dp.apply(inputs, training=True)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = dp.apply(inputs, training=False)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 3)), np_output)
@test_util.run_deprecated_v1
def testDynamicLearningPhase(self):
with self.cached_session() as sess:
dp = core_layers.Dropout(0.5, seed=1)
inputs = array_ops.ones((5, 5))
training = array_ops.placeholder(dtype='bool')
dropped = dp.apply(inputs, training=training)
self.evaluate(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={training: True})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={training: False})
self.assertAllClose(np.ones((5, 5)), np_output)
@test_util.run_in_graph_and_eager_modes
def testDynamicNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [None, 1, None]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testCustomNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [5, 1, 2]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
@test_util.run_deprecated_v1
def testFunctionalDropout(self):
with self.cached_session():
inputs = array_ops.ones((5, 5))
dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1)
variables.global_variables_initializer().run()
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 5)), np_output)
@test_util.run_deprecated_v1
def testDynamicRate(self):
with self.cached_session() as sess:
rate = array_ops.placeholder(dtype='float32', name='rate')
dp = core_layers.Dropout(rate, name='dropout')
inputs = array_ops.ones((5, 5))
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={rate: 0.5})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={rate: 0.0})
self.assertAllClose(np.ones((5, 5)), np_output)
class FlattenTest(test.TestCase):
@test_util.run_deprecated_v1
def testCreateFlatten(self):
with self.cached_session() as sess:
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((3, 2, 3))})
self.assertEqual(list(np_output.shape), [3, 6])
self.assertEqual(y.get_shape().as_list(), [None, 6])
x = array_ops.placeholder(shape=(1, 2, 3, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((1, 2, 3, 2))})
self.assertEqual(list(np_output.shape), [1, 12])
self.assertEqual(y.get_shape().as_list(), [1, 12])
def testComputeShape(self):
shape = core_layers.Flatten().compute_output_shape((1, 2, 3, 2))
self.assertEqual(shape.as_list(), [1, 12])
shape = core_layers.Flatten().compute_output_shape((None, 3, 2))
self.assertEqual(shape.as_list(), [None, 6])
shape = core_layers.Flatten().compute_output_shape((None, 3, None))
self.assertEqual(shape.as_list(), [None, None])
@test_util.run_deprecated_v1
def testDataFormat5d(self):
np_input_channels_last = np.arange(
120, dtype='float32').reshape([1, 5, 4, 3, 2])
with self.test_session() as sess:
x = array_ops.placeholder(shape=(1, 5, 4, 3, 2), dtype='float32')
y = core_layers.Flatten(data_format='channels_last')(x)
np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last})
x = array_ops.placeholder(shape=(1, 2, 5, 4, 3), dtype='float32')
y = core_layers.Flatten(data_format='channels_first')(x)
np_input_channels_first = np.transpose(np_input_channels_last,
[0, 4, 1, 2, 3])
np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first})
self.assertAllEqual(np_output_cl, np_output_cf)
@test_util.run_deprecated_v1
def testDataFormat4d(self):
np_input_channels_last = np.arange(
24, dtype='float32').reshape([1, 4, 3, 2])
with self.test_session() as sess:
x = array_ops.placeholder(shape=(1, 4, 3, 2), dtype='float32')
y = core_layers.Flatten(data_format='channels_last')(x)
np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last})
x = array_ops.placeholder(shape=(1, 2, 4, 3), dtype='float32')
y = core_layers.Flatten(data_format='channels_first')(x)
np_input_channels_first = np.transpose(np_input_channels_last,
[0, 3, 1, 2])
np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first})
self.assertAllEqual(np_output_cl, np_output_cf)
@test_util.run_deprecated_v1
def testFunctionalFlatten(self):
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.flatten(x, name='flatten')
self.assertEqual(y.get_shape().as_list(), [None, 6])
@test_util.run_deprecated_v1
def testFlatten0D(self):
x = array_ops.placeholder(shape=(None,), dtype='float32')
y = core_layers.Flatten()(x)
with self.cached_session() as sess:
np_output = sess.run(y, feed_dict={x: np.zeros((5,))})
self.assertEqual(list(np_output.shape), [5, 1])
self.assertEqual(y.shape.as_list(), [None, 1])
@test_util.run_deprecated_v1
def testFlattenUnknownAxes(self):
with self.cached_session() as sess:
x = array_ops.placeholder(shape=(5, None, None), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 2, 3))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
x = array_ops.placeholder(shape=(5, None, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 3, 2))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
if __name__ == '__main__':
test.main()
|
py | b40a26eb94b14d4c68f173b2b305c7f158bb6b14 | import FWCore.ParameterSet.Config as cms
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOEcalRecalElectronHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, # choose logical OR between Triggerbits
eventSetupPathsKey = 'EcalRecalElectron',
throw = False # tolerate triggers stated above, but not available
)
from Configuration.StandardSequences.Reconstruction_Data_cff import ecalLocalRecoSequence, pfClusteringPS, pfClusteringECAL, ecalClusters
from Calibration.EcalAlCaRecoProducers.ALCARECOEcalCalIsolElectron_cff import *
#ecal rechits
from RecoLocalCalo.Configuration.RecoLocalCalo_cff import ecalLocalRecoSequence
recoECALSeq = cms.Sequence( ecalLocalRecoSequence)
from RecoParticleFlow.PFClusterProducer.particleFlowCluster_cff import *
rerecoPFClusteringSeq = cms.Sequence(pfClusteringPS + pfClusteringECAL)
from RecoEcal.Configuration.RecoEcal_cff import *
from Calibration.EcalCalibAlgos.electronRecalibSCAssociator_cfi import *
ecalClusteringSeq = cms.Sequence(ecalClusters * electronRecalibSCAssociator)
#sandboxRerecoSeq = cms.Sequence(electronRecoSeq * ecalClusteringSeq)
#sandboxPFRerecoSeq = cms.Sequence(electronRecoSeq * rerecoPFClusteringSeq * ecalClusteringSeq)
rerecoECALSeq = cms.Sequence(recoECALSeq * rerecoPFClusteringSeq * ecalClusteringSeq)
############################################### FINAL SEQUENCES
# sequences used in AlCaRecoStreams_cff.py
#redo the preselection of electrons with selectorProducerSeq for recHit reducers: they use the selected objects as input
seqALCARECOEcalRecalElectron = cms.Sequence(ALCARECOEcalRecalElectronHLT * rerecoECALSeq * selectorProducerSeq * ALCARECOEcalCalElectronECALSeq)
|
py | b40a26fe75b391d5f0756b10e81f08910b14b33f | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="decision_boundary",
version="0.0.2",
author="Paolo Perrotta",
author_email="[email protected]",
description="A minimalist Python package to draw decision boundaries in machine learning.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nusco/decision-boundary",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
py | b40a270b4d026e2c4aa3e09489a887850668acef | #!/usr/bin/env python3
import singer
import tap_framework
import json
import sys
from tap_rebound.client import ReboundClient
from tap_rebound.streams import AVAILABLE_STREAMS
from tap_framework.streams import is_selected
LOGGER = singer.get_logger() # noqa
class ReboundRunner(tap_framework.Runner):
def get_streams_to_replicate(self):
streams = []
if not self.catalog:
return streams
stream_map = {s.stream: s for s in self.catalog.streams}
for stream_catalog in self.catalog.streams:
if not is_selected(stream_catalog):
LOGGER.info("'{}' is not marked selected, skipping."
.format(stream_catalog.stream))
continue
for available_stream in self.available_streams:
if available_stream.matches_catalog(stream_catalog):
if not available_stream.requirements_met(self.catalog):
raise RuntimeError(
"{} requires that that the following are "
"selected: {}"
.format(stream_catalog.stream,
','.join(available_stream.REQUIRES)))
to_add = available_stream(
self.config, self.state, stream_catalog, self.client, stream_map)
streams.append(to_add)
return streams
def do_discover(self):
LOGGER.info("Starting discovery.")
catalog = []
for available_stream in self.available_streams:
stream = available_stream(self.config, self.state, None, None, {})
catalog += stream.generate_catalog()
json.dump({'streams': catalog}, sys.stdout, indent=4)
@singer.utils.handle_top_exception(LOGGER)
def main():
args = singer.utils.parse_args(required_config_keys=['token', 'username'])
client = ReboundClient(args.config)
runner = ReboundRunner(
args, client, AVAILABLE_STREAMS)
if args.discover:
runner.do_discover()
else:
runner.do_sync()
if __name__ == '__main__':
main()
|
py | b40a27176e53f8afeac4fd273e2f2e466dd84f4c | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class DescribeCastersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'DescribeCasters','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_PageNum(self):
return self.get_query_params().get('PageNum')
def set_PageNum(self,PageNum):
self.add_query_param('PageNum',PageNum)
def get_CasterName(self):
return self.get_query_params().get('CasterName')
def set_CasterName(self,CasterName):
self.add_query_param('CasterName',CasterName)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CasterId(self):
return self.get_query_params().get('CasterId')
def set_CasterId(self,CasterId):
self.add_query_param('CasterId',CasterId)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self,Status):
self.add_query_param('Status',Status) |
py | b40a2860dff1ca2588cae6599dd883c877a15812 | from django.shortcuts import render
from listing.models import Listings
from realtor.models import Realtors
from listing.choices import bedroom_choices, price_choices, state_choices
def index(request):
title = 'Real Estate | Welcome'
template = 'main/index.html'
# Order & Filter the context for the listing page.
listings = Listings.objects.all().order_by(
'-list_date').filter(is_published=True)[:3]
context = {
'title': title,
'listings': listings,
'state_choices': state_choices,
'bedroom_choices': bedroom_choices,
'price_choices': price_choices,
}
return render(request, template, context)
def about(request):
title = 'Real Estate | About'
template = 'main/about.html'
mvp = Realtors.objects.all().filter(is_mvp=True)
realtors = Realtors.objects.all().order_by('-hire_date')
context = {
'title': title,
'mvp': mvp,
'realtors': realtors,
}
return render(request, template, context)
|
py | b40a28f491aa0e596ae6981ee7c0bbf5e990003a | import re
#1
def check_web_address(text):
pattern = "^[\w\.\-\+]+[come|info|edu]*$"
result = re.findall(pattern, text)
#print(result)
check_web_address("gmail.com") # True
check_web_address("www@google") # False
check_web_address("www.Coursera.org") # True
check_web_address("web-address.com/homepage") # False
check_web_address("My_Favorite-Blog.US") # True
#2
def check_time(text):
pattern = "^([1-9]|[012])+\:[0-5]+[0-9]+[ ]*[aApPmM]+$"
result = re.search(pattern, text)
#print(result)
check_time("12:45pm") # True
check_time("9:59 AM") # True
check_time("6:60am") # False
check_time("five o'clock") # False
#3
def contains_acronym(text):
pattern = "[\(][a-z(A-Z)+0-9]{2,}[\)]"
result = re.search(pattern, text)
#print(result)
contains_acronym("Instant messaging (IM) is a set of communication technologies used for text-based communication") # True
contains_acronym("American Standard Code for Information Interchange (ASCII) is a character encoding standard for electronic communication") # True
contains_acronym("Please do NOT enter without permission!") # False
contains_acronym("PostScript is a fourth-generation programming language (4GL)") # True
contains_acronym("Have fun using a self-contained underwater breathing apparatus (Scuba)!") # True
#5
def check_zip_code (text):
result = re.search(r"([^\d])[\s][\d]{5}([\-])*(([\d]){4})*", text)
#print(result)
check_zip_code("The zip codes for New York are 10001 thru 11104.") # True
check_zip_code("90210 is a TV show") # False
check_zip_code("Their address is: 123 Main Street, Anytown, AZ 85258-0001.") # True
check_zip_code("The Parliament of Canada is at 111 Wellington St, Ottawa, ON K1A0A9.") # False
|
py | b40a2ade8f72af973e9b85086921e14856bc2840 | """Views for icon system"""
from typing import Any, Dict, List, Tuple
from django.http.request import HttpRequest
from django.http.response import HttpResponse, HttpResponseBadRequest
from django.views.generic import DetailView, ListView
from django.contrib.auth.decorators import permission_required
from django.views.decorators.http import require_http_methods
from django.contrib import messages
from django.urls import reverse
from . import models
from .view_types import Folder, FolderIcon, LinkIcon, Icon
@require_http_methods(["POST"])
@permission_required("icons.change_page")
def set_page_positions(request: HttpRequest):
"""Update the page positions of icons"""
sorted_items = request.POST.getlist('sort[]')
for i, item_key in enumerate(sorted_items):
try:
item_type, item_id = item_key.split("-")
item_id = int(item_id)
if item_type == "folder":
models.PageFolder.objects.filter(pk=item_id).update(position=(i+1)*10)
elif item_type == "icon":
models.PageIcon.objects.filter(pk=item_id).update(position=(i+1)*10)
else:
return HttpResponseBadRequest()
except ValueError:
return HttpResponseBadRequest()
messages.info(request, "Page icon positions saved successfully")
return HttpResponse()
class PageDetail(DetailView):
"""Icon page embedded in site nav"""
model = models.Page
def get_queryset(self):
qs = super().get_queryset()
qs = qs.prefetch_related('page_folders',
'page_folders__folder',
'page_folders__folder__folder_icons',
'page_folders__folder__folder_icons__icon',
'page_icons',
'page_icons__icon',
'crosslinks',
'crosslinks__crosslink',)
return qs
def get_context_data(self, **kwargs: Dict[str, Any]) -> Dict[str, Any]:
context = super().get_context_data(**kwargs)
page = self.object
assert isinstance(page, models.Page)
context['icons'] = self.get_icons()
context['folders'] = self.get_folder_modals()
crosslink_left, crosslink_right = self.get_crosslinks()
context['crosslinks_left'] = crosslink_left
context['crosslinks_right'] = crosslink_right
context['sort_url'] = reverse('icons:set-sort-positions')
return context
def get_folder_modals(self) -> List[Folder]:
"""Get the folder modals for template render"""
out = []
for page_folder in self.object.page_folders.all():
assert isinstance(page_folder, models.PageFolder)
db_folder = page_folder.folder
assert isinstance(db_folder, models.Folder)
icons = []
for folder_icon in db_folder.folder_icons.all():
assert isinstance(folder_icon, models.FolderIcon)
icon = folder_icon.icon
assert isinstance(icon, models.Icon)
icons.append(LinkIcon(position=folder_icon.position, title=icon.title,
icon=icon.icon, url=icon.url, page_icon_id=None))
out.append(Folder(folder_id=db_folder.pk, title=db_folder.title, icons=icons))
return out
def get_icons(self) -> List[Icon]:
"""Get the icons for template display"""
out = []
for page_icon in self.object.page_icons.all():
assert isinstance(page_icon, models.PageIcon)
icon = page_icon.icon
assert isinstance(icon, models.Icon)
out.append(LinkIcon(position=page_icon.position,
title=icon.title,
icon=icon.icon,
url=icon.url,
page_icon_id=page_icon.pk))
for page_folder in self.object.page_folders.all():
assert isinstance(page_folder, models.PageFolder)
folder = page_folder.folder
assert isinstance(folder, models.Folder)
out.append(FolderIcon(position=page_folder.position,
folder_id=folder.id,
title=folder.title,
icon=folder.icon,
page_folder_id=page_folder.id))
out.sort(key=lambda obj: obj.position)
return out
def get_crosslinks(self) -> Tuple[List[models.Page], List[models.Page]]:
"""Return the left and right crosslinks"""
left = []
right = []
for crosslink in self.object.crosslinks.all():
assert isinstance(crosslink, models.CrossLink)
if crosslink.side == "LEFT":
left.append(crosslink.crosslink)
if crosslink.side == "RIGHT":
right.append(crosslink.crosslink)
return (left, right)
class PageList(ListView):
"""List of detail pages"""
model = models.Page
|
py | b40a2b0436b4f42a334357c1c710cd2d4cb38a2e | """
"""
import numpy as np
from astropy.utils.misc import NumpyRNGContext
from .mass_profile import cumulative_mass_PDF
from ....halo_boundary_functions import halo_mass_to_halo_radius
from ......model_helpers import custom_spline
from ......model_defaults import halo_mass_definition as default_halo_mass_definition
from .......sim_manager.sim_defaults import default_cosmology, default_redshift
from .......custom_exceptions import HalotoolsError
__all__ = ('mc_generate_nfw_radial_positions', )
def mc_generate_nfw_radial_positions(num_pts=int(1e4), conc=5,
cosmology=default_cosmology, redshift=default_redshift,
mdef=default_halo_mass_definition, seed=None,
**kwargs):
r""" Return a Monte Carlo realization of points in an NFW profile.
See :ref:`monte_carlo_nfw_spatial_profile` for a discussion of this technique.
Parameters
-----------
num_pts : int, optional
Number of points in the Monte Carlo realization of the profile.
Default is 1e4.
conc : float, optional
Concentration of the NFW profile being realized.
Default is 5.
halo_mass : float, optional
Total mass of the halo whose profile is being realized.
If ``halo_mass`` is unspecified,
keyword argument ``halo_radius`` must be specified.
halo_radius : float, optional
Physical boundary of the halo whose profile is being realized
in units of Mpc/h.
If ``halo_radius`` is unspecified,
keyword argument ``halo_mass`` must be specified, in which case the
outer boundary of the halo will be determined
according to the selected mass definition
cosmology : object, optional
Instance of an Astropy `~astropy.cosmology` object.
Default is set in `~halotools.sim_manager.sim_defaults`
redshift: array_like, optional
Can either be a scalar, or a numpy array of the same dimension as the input ``halo_mass``.
Default is set in `~halotools.sim_manager.sim_defaults`
mdef: str
String specifying the halo mass definition, e.g., 'vir' or '200m'.
Default is set in `~halotools.empirical_models.model_defaults`
seed : int, optional
Random number seed used in the Monte Carlo realization.
Default is None, which will produce stochastic results.
Returns
--------
radial_positions : array_like
Numpy array storing a Monte Carlo realization of the halo profile.
All values will lie strictly between 0 and the halo boundary.
Examples
---------
>>> radial_positions = mc_generate_nfw_radial_positions(halo_mass = 1e12, conc = 10)
>>> radial_positions = mc_generate_nfw_radial_positions(halo_radius = 0.25)
"""
try:
halo_radius = kwargs['halo_radius']
except KeyError:
try:
halo_mass = kwargs['halo_mass']
halo_radius = halo_mass_to_halo_radius(halo_mass, cosmology, redshift, mdef)
except KeyError:
msg = ("\nIf keyword argument ``halo_radius`` is unspecified, "
"argument ``halo_mass`` must be specified.\n")
raise HalotoolsError(msg)
except TypeError:
raise HalotoolsError("Input ``halo_mass`` must be a float")
halo_radius = np.atleast_1d(halo_radius).astype(np.float64)
try:
assert len(halo_radius) == 1
except AssertionError:
msg = ("Input ``halo_radius`` must be a float")
raise HalotoolsError(msg)
conc = np.atleast_1d(conc).astype(np.float64)
try:
assert len(conc) == 1
except AssertionError:
msg = ("Input ``conc`` must be a float")
raise HalotoolsError(msg)
# Build lookup table from which to tabulate the inverse cumulative_mass_PDF
Npts_radius_table = int(1e3)
radius_array = np.logspace(-4, 0, Npts_radius_table)
logradius_array = np.log10(radius_array)
table_ordinates = cumulative_mass_PDF(radius_array, conc)
log_table_ordinates = np.log10(table_ordinates)
funcobj = custom_spline(log_table_ordinates, logradius_array, k=3)
# Use method of Inverse Transform Sampling to generate a Monte Carlo realization
# of the radial positions
with NumpyRNGContext(seed):
randoms = np.random.uniform(0, 1, num_pts)
log_randoms = np.log10(randoms)
log_scaled_radial_positions = funcobj(log_randoms)
scaled_radial_positions = 10.**log_scaled_radial_positions
radial_positions = scaled_radial_positions*halo_radius
return radial_positions
|
py | b40a2b43a0a58ccf4151ef59c874663caaf160cd | # -*- coding: utf-8 -*-
"""
File Name: coinChange
Author : jing
Date: 2020/3/27
https://leetcode-cn.com/problems/coin-change/
"""
import math
class Solution:
# 动态规划
def coinChange(self, coins, amount):
if amount == 0:
return 0
dp = list()
temp = amount + 1
for i in range(amount + 1):
if i not in coins:
dp.append(temp)
else:
dp.append(1)
for i in range(amount + 1):
if i not in coins:
for j in coins:
if i - j > 0:
dp[i] = min(dp[i - j] + 1, dp[i])
return dp[amount] if dp[amount] != temp else -1
# 用的类似的方法,但是是用目标值-coin,超时了……
def coinChange2(self, coins, amount: int) -> int:
if coins is None or len(coins) == 0 or amount < 1:
return 0
from collections import deque
queue = deque([(0, 0)])
visited = set([0])
while queue:
cur, step = queue.popleft()
if cur == amount:
return step
if cur > amount:
continue
for coin in coins:
value = cur + coin
if value not in visited:
visited.add((value))
queue.append((value, step + 1))
return -1
if __name__ == '__main__':
print(Solution().coinChange2([1, 2, 5], amount=100))
|
py | b40a2b72c8af521b6cfa0e5cdd31dc9ad3ebd451 | # -*- coding: utf-8 -*-
from payplug.resources import OneyPaymentSimulation
from payplug.test import TestBase
class TestOneyPaymentSimulationResource(TestBase):
def test_initializer_oney_payment_simulation(self):
simulation_attributes = {
"x3_with_fees": {
"down_payment_amount": 67667,
"nominal_annual_percentage_rate": 6.04,
"effective_annual_percentage_rate": 6.21,
"installments": [
{
"date": "2019-12-29T01:00:00.000Z",
"amount": 66667
},
{
"date": "2020-01-29T01:00:00.000Z",
"amount": 66666
}
],
"total_cost": 1000
},
}
simulation_object = OneyPaymentSimulation(**simulation_attributes)
operation = simulation_object.x3_with_fees
assert isinstance(operation, OneyPaymentSimulation.Operation)
assert operation.down_payment_amount == 67667
assert operation.nominal_annual_percentage_rate == 6.04
assert operation.effective_annual_percentage_rate == 6.21
assert operation.installments == [
{
"date": "2019-12-29T01:00:00.000Z",
"amount": 66667
},
{
"date": "2020-01-29T01:00:00.000Z",
"amount": 66666
}
]
assert operation.total_cost == 1000
|
py | b40a2c1a2ab96737c38cb863a3afa34fcfd6066a | """
This is a modified version of source code from the Accerciser project
(http://live.gnome.org/accerciser).
Backend to the console plugin.
@author: Eitan Isaacson
@organization: IBM Corporation
@copyright: Copyright (c) 2007 IBM Corporation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
"""
import re
import sys
import os
from io import StringIO
import tkinter
import IPython
from functools import reduce
#Works by itself, but not able to import it into the GUI at this time.
class IterableIPShell:
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
cin=None, cout=None,cerr=None, input_func=None):
if input_func:
IPython.iplib.raw_input_original = input_func
if cin:
IPython.Shell.Term.cin = cin
if cout:
IPython.Shell.Term.cout = cout
if cerr:
IPython.Shell.Term.cerr = cerr
if argv is None:
argv=[]
# This is to get rid of the blockage that occurs during
# IPython.Shell.InteractiveShell.user_setup()
IPython.iplib.raw_input = lambda x: None
self.term = IPython.genutils.IOTerm(cin=cin, cout=cout, cerr=cerr)
os.environ['TERM'] = 'dumb'
excepthook = sys.excepthook
self.IP = IPython.Shell.make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
embedded=True,
shell_class=IPython.Shell.InteractiveShell)
self.IP.system = lambda cmd: self.shell(self.IP.var_expand(cmd),
header='IPython system call: ',
verbose=self.IP.rc.system_verbose)
sys.excepthook = excepthook
self.iter_more = 0
self.history_level = 0
self.complete_sep = re.compile('[\s\{\}\[\]\(\)]')
def execute(self):
self.history_level = 0
orig_stdout = sys.stdout
sys.stdout = IPython.Shell.Term.cout
try:
line = self.IP.raw_input(None, self.iter_more)
if self.IP.autoindent:
self.IP.readline_startup_hook(None)
except KeyboardInterrupt:
self.IP.write('\nKeyboardInterrupt\n')
self.IP.resetbuffer()
# keep cache in sync with the prompt counter:
self.IP.outputcache.prompt_count -= 1
if self.IP.autoindent:
self.IP.indent_current_nsp = 0
self.iter_more = 0
except:
self.IP.showtraceback()
else:
self.iter_more = self.IP.push(line)
if (self.IP.SyntaxTB.last_syntax_error and
self.IP.rc.autoedit_syntax):
self.IP.edit_syntax_error()
if self.iter_more:
self.prompt = str(self.IP.outputcache.prompt2).strip()
if self.IP.autoindent:
self.IP.readline_startup_hook(self.IP.pre_readline)
else:
self.prompt = str(self.IP.outputcache.prompt1).strip()
sys.stdout = orig_stdout
def historyBack(self):
self.history_level -= 1
return self._getHistory()
def historyForward(self):
self.history_level += 1
return self._getHistory()
def _getHistory(self):
try:
rv = self.IP.user_ns['In'][self.history_level].strip('\n')
except IndexError:
self.history_level = 0
rv = ''
return rv
def updateNamespace(self, ns_dict):
self.IP.user_ns.update(ns_dict)
def complete(self, line):
split_line = self.complete_sep.split(line)
possibilities = self.IP.complete(split_line[-1])
if possibilities:
common_prefix = reduce(self._commonPrefix, possibilities)
completed = line[:-len(split_line[-1])]+common_prefix
else:
completed = line
return completed, possibilities
def _commonPrefix(self, str1, str2):
for i in range(len(str1)):
if not str2.startswith(str1[:i+1]):
return str1[:i]
return str1
def shell(self, cmd,verbose=0,debug=0,header=''):
stat = 0
if verbose or debug: print(header+cmd)
# flush stdout so we don't mangle python's buffering
if not debug:
input, output = os.popen4(cmd)
print(output.read())
output.close()
input.close()
ansi_colors = {'0;30': 'Black',
'0;31': 'Red',
'0;32': 'Green',
'0;33': 'Brown',
'0;34': 'Blue',
'0;35': 'Purple',
'0;36': 'Cyan',
'0;37': 'LightGray',
'1;30': 'DarkGray',
'1;31': 'DarkRed',
'1;32': 'SeaGreen',
'1;33': 'Yellow',
'1;34': 'LightBlue',
'1;35': 'MediumPurple',
'1;36': 'LightCyan',
'1;37': 'White'}
class TkConsoleView(tkinter.Text):
def __init__(self,root):
tkinter.Text.__init__(self,root)
# As the stdout,stderr etc. get fiddled about with we need to put any
# debug output into a file
self.debug=0
if self.debug:
self.o = open('debug.out','w')
# Keeps track of where the insert cursor should be on the entry line
self.mark = 'scroll_mark'
self.mark_set(self.mark,tkinter.END)
self.mark_gravity(self.mark,tkinter.RIGHT)
# Set the tags for colouring the text
for code in ansi_colors:
self.tag_config(code,
foreground=ansi_colors[code])
self.tag_config('notouch') # Tag for indicating what areas of the widget aren't editable
# colour_pat matches the colour tags and places these in a group
# match character with hex value 01 (start of heading?) zero or more times, followed by
# the hex character 1b (escape) then "[" and group ...things.. followed by m (?) and then
# hex character 02 (start of text) zero or more times
self.color_pat = re.compile('\x01?\x1b\[(.*?)m\x02?')
self.line_start = 'line_start' # Tracks start of user input on the line (excluding prompt)
self.mark_set(self.line_start,tkinter.INSERT)
self.mark_gravity(self.line_start,tkinter.LEFT)
self._setBindings()
def write(self, text, editable=False):
segments = self.color_pat.split(text)
# First is blank line
segment = segments.pop(0)
# Keep track of where we started entering text so we can set as non-editable
self.start_mark = 'start_mark'
self.mark_set(self.start_mark,tkinter.INSERT)
self.mark_gravity(self.start_mark,tkinter.LEFT)
self.insert(tkinter.END, segment)
if segments:
# Just return the colour tags
ansi_tags = self.color_pat.findall(text)
for tag in ansi_tags:
i = segments.index(tag)
self.insert(tkinter.END,segments[i+1],tag)
segments.pop(i)
if not editable:
if self.debug:
print("adding notouch between %s : %s" % ( self.index(self.start_mark),\
self.index(tkinter.INSERT) ))
self.tag_add('notouch',self.start_mark,"%s-1c" % tkinter.INSERT)
self.mark_unset(self.start_mark)
#jmht self.scroll_mark_onscreen(self.mark)
def showBanner(self,banner):
"""Print the supplied banner on starting the shell"""
self.write(banner)
def showPrompt(self, prompt):
self.write(prompt)
self.mark_set(self.line_start,tkinter.INSERT)
self.see(tkinter.INSERT) #Make sure we can always see the prompt
def changeLine(self, text):
self.delete(self.line_start,"%s lineend" % self.line_start)
self.write(text, True)
def getCurrentLine(self):
rv = self.get(self.line_start,tkinter.END)
if self.debug:
print("getCurrentline: %s" % rv, file=self.o)
print("INSERT: %s" % tkinter.END, file=self.o)
print("END: %s" % tkinter.INSERT, file=self.o)
print("line_start: %s" % self.index(self.line_start), file=self.o)
return rv
def showReturned(self, text):
self.tag_add('notouch',self.line_start,"%s lineend" % self.line_start )
self.write('\n'+text)
if text:
self.write('\n')
self.showPrompt(self.prompt)
#self.mark_set(self.line_start,Tkinter.END) #jmht don't need this as showprompt sets mark
def _setBindings(self):
""" Bind the keys we require.
REM: if a bound function returns "break" then no other bindings are called
If it returns None, then the other default bindings are called.
"""
self.bind("<Key>",self.processKeyPress)
self.bind("<Return>",self.processEnterPress)
self.bind("<Up>",self.processUpPress)
self.bind("<Down>",self.processDownPress)
self.bind("<Tab>",self.processTabPress)
self.bind("<BackSpace>",self.processBackSpacePress)
def isEditable(self):
""" Scan the notouch tag range in pairs and see if the INSERT index falls
between any of them.
"""
ranges = self.tag_ranges('notouch')
first=None
for idx in ranges:
if not first:
first=idx
continue
else:
if self.debug:
print("Comparing %s between %s : %s " % (self.index(tkinter.INSERT),first,idx))
if self.compare( tkinter.INSERT,'>=',first ) and \
self.compare( tkinter.INSERT,'<=',idx ):
return False
first=None
return True
def processKeyPress(self,event):
if self.debug:
print("processKeyPress got key: %s" % event.char, file=self.o)
print("processKeyPress INSERT: %s" % self.index(tkinter.INSERT), file=self.o)
print("processKeyPress END: %s" % self.index(tkinter.END), file=self.o)
if not self.isEditable():
# Move cursor mark to start of line
self.mark_set(tkinter.INSERT,self.mark)
# Make sure line_start follows inserted text
self.mark_set(self.mark,"%s+1c" % tkinter.INSERT)
def processBackSpacePress(self,event):
if not self.isEditable():
return "break"
def processEnterPress(self,event):
self._processLine()
return "break" # Need break to stop the other bindings being called
def processUpPress(self,event):
self.changeLine(self.historyBack())
return "break"
def processDownPress(self,event):
self.changeLine(self.historyForward())
return "break"
def processTabPress(self,event):
if not self.getCurrentLine().strip():
return
completed, possibilities = self.complete(self.getCurrentLine())
if len(possibilities) > 1:
slice = self.getCurrentLine()
self.write('\n')
for symbol in possibilities:
self.write(symbol+'\n')
self.showPrompt(self.prompt)
self.changeLine(completed or slice)
return "break"
class IPythonView(TkConsoleView, IterableIPShell):
def __init__(self,root,banner=None):
TkConsoleView.__init__(self,root)
self.cout = StringIO()
IterableIPShell.__init__(self, cout=self.cout,cerr=self.cout,
input_func=self.raw_input)
if banner:
self.showBanner(banner)
self.execute()
self.cout.truncate(0)
self.showPrompt(self.prompt)
self.interrupt = False
def raw_input(self, prompt=''):
if self.interrupt:
self.interrupt = False
raise KeyboardInterrupt
return self.getCurrentLine()
def _processLine(self):
self.history_pos = 0
self.execute()
rv = self.cout.getvalue()
if self.debug:
print("_processLine got rv: %s" % rv, file=self.o)
if rv: rv = rv.strip('\n')
self.showReturned(rv)
self.cout.truncate(0)
if __name__ == "__main__":
root = tkinter.Tk()
s=IPythonView(root)
s.pack()
root.mainloop()
|
py | b40a2c49e52ecf1cae280291e7ea7f774c577525 | import _plotly_utils.basevalidators
class TexttemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="texttemplate", parent_name="treemap", **kwargs):
super(TexttemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
py | b40a2c620646b0a0966a9d1eff098c94299ec3ba | import os
import sys
import time
import traceback
import psutil
import math
from threading import Thread, Event
from flask import jsonify
from flask_cors import CORS
import numpy as np
from collections import Counter
from carbontracker import constants
from carbontracker import loggerutil
from carbontracker import predictor
from carbontracker import exceptions
from carbontracker.components import component
import cpuinfo
import time
from flask import Flask
import geocoder
class CarbonTrackerThread(Thread):
"""Thread to fetch consumptions"""
def __init__(self,
components,
logger,
ignore_errors,
delete,
flask_server,
update_interval=10):
super(CarbonTrackerThread, self).__init__()
self.name = "CarbonTrackerThread"
self.delete = delete
self.components = components
self.update_interval = update_interval
self.ignore_errors = ignore_errors
self.logger = logger
self.flask_server = flask_server
self.epoch_times = []
self.running = True
self.measuring = False
self.epoch_counter = 0
self.daemon = True
self.g = geocoder.ip('me')
self.energy_stats = {
"component_energy": {},
"state": self.g.state,
"component_names": {
"cpu": {},
"gpu": {}
}
}
self.start()
def run(self):
"""Thread's activity."""
try:
self.begin()
while self.running:
self.component_energy_per_epoch()
if not self.measuring:
continue
self._collect_measurements()
time.sleep(self.update_interval)
# Shutdown in thread's activity instead of epoch_end() to ensure
# that we only shutdown after last measurement.
self._components_shutdown()
except Exception as e:
self._handle_error(e)
def begin(self):
self._components_remove_unavailable()
self._components_init()
self._log_components_info()
cpu_name = cpuinfo.get_cpu_info()['brand_raw']
for comp in self.components:
if comp.name == "cpu":
self.energy_stats["component_names"][comp.name][cpu_name] = len(comp.devices())
else: self.energy_stats["component_names"][comp.name] = Counter(comp.devices())
self.logger.info("Monitoring thread started.")
def stop(self):
if not self.running:
return
self.measuring = False
self.running = False
self.logger.info("Monitoring thread ended.")
self.logger.output("Finished monitoring.", verbose_level=1)
def epoch_start(self):
self.epoch_counter += 1
self.measuring = True
self.cur_epoch_time = time.time()
def epoch_end(self):
self.measuring = False
self.epoch_times.append(time.time() - self.cur_epoch_time)
self._log_epoch_measurements()
def _log_components_info(self):
log = ["The following components were found:"]
for comp in self.components:
name = comp.name.upper()
devices = ", ".join(comp.devices())
log.append(f"{name} with device(s) {devices}.")
log_str = " ".join(log)
self.logger.info(log_str)
self.logger.output(log_str, verbose_level=1)
def _log_epoch_measurements(self):
self.logger.info(f"Epoch {self.epoch_counter}:")
duration = self.epoch_times[-1]
self.logger.info(
f"Duration: {loggerutil.convert_to_timestring(duration, True)}")
for comp in self.components:
if comp.power_usages and comp.power_usages[-1]:
power_avg = np.mean(comp.power_usages[-1], axis=0)
# If np.mean is calculated during a measurement, it will get an
# empty list and return nan, if this is the case we take the previous measurement.
# TODO: Use semaphores to wait for measurement to finish.
if np.isnan(power_avg).all():
power_avg = np.mean(
comp.power_usages[-2],
axis=0) if len(comp.power_usages) >= 2 else None
else:
self.logger.err_warn(
"Epoch duration is too short for a measurement to be "
"collected.")
power_avg = None
self.logger.info(
f"Average power usage (W) for {comp.name}: {power_avg}")
def _components_remove_unavailable(self):
self.components = [cmp for cmp in self.components if cmp.available()]
if not self.components:
raise exceptions.NoComponentsAvailableError()
def _components_init(self):
for comp in self.components:
comp.init()
def _components_shutdown(self):
for comp in self.components:
comp.shutdown()
def _collect_measurements(self):
"""Collect one round of measurements."""
for comp in self.components:
comp.collect_power_usage(self.epoch_counter)
def total_energy_per_epoch(self):
"""Retrieves total energy (kWh) per epoch used by all components"""
total_energy = np.zeros(len(self.epoch_times))
for comp in self.components:
energy_usage = comp.energy_usage(self.epoch_times)
total_energy += energy_usage
return total_energy
def component_energy_per_epoch(self):
for comp in self.components:
self.energy_stats["component_energy"][comp.name] = comp.energy_usage(self.epoch_times, expanded_interval=self.update_interval)
self.flask_server.energy_stats = self.energy_stats
return self.energy_stats
def _handle_error(self, error):
err_str = traceback.format_exc()
if self.ignore_errors:
err_str = (f"Ignored error: {err_str}Continued training without "
"monitoring...")
self.logger.err_critical(err_str)
self.logger.output(err_str)
if self.ignore_errors:
# Stop monitoring but continue training.
self.delete()
else:
os._exit(70)
class FlaskServerThread(Thread):
def __init__(self):
super(FlaskServerThread, self).__init__()
self.training_paused = False
self.energy_stats = None
self.start()
def run(self):
app = Flask(__name__)
CORS(app)
@app.route("/")
def main():
return "EnergyVis backend is tracking..."
@app.route("/pause")
def pause():
self.training_paused = not self.training_paused
return {"training_paused": self.training_paused}
@app.route("/energy-stats")
def get_energy_statistics():
return self.energy_stats["component_energy"]
@app.route("/initial-setup")
def get_initial_setup():
return {
"component_names": self.energy_stats["component_names"],
"state": self.energy_stats["state"]
}
app.run()
class CarbonTracker:
def __init__(self,
epochs,
epochs_before_pred=1,
monitor_epochs=1,
update_interval=10,
stop_and_confirm=False,
ignore_errors=False,
components="all",
devices_by_pid=False,
log_dir=None,
verbose=1,
decimal_precision=6):
self.epochs = epochs
self.epochs_before_pred = (epochs if epochs_before_pred < 0 else
epochs_before_pred)
self.monitor_epochs = (epochs
if monitor_epochs < 0 else monitor_epochs)
if (self.monitor_epochs == 0
or self.monitor_epochs < self.epochs_before_pred):
raise ValueError(
"Argument monitor_epochs expected a value in "
f"{{-1, >0, >=epochs_before_pred}}, got {monitor_epochs}.")
self.stop_and_confirm = stop_and_confirm
self.ignore_errors = ignore_errors
self.epoch_counter = 0
self.decimal_precision = decimal_precision
self.deleted = False
self.flask_server = FlaskServerThread()
try:
pids = self._get_pids()
self.logger = loggerutil.Logger(log_dir=log_dir, verbose=verbose)
self.tracker = CarbonTrackerThread(
delete=self._delete,
components=component.create_components(
components=components,
pids=pids,
devices_by_pid=devices_by_pid),
logger=self.logger,
ignore_errors=ignore_errors,
flask_server=self.flask_server,
update_interval=update_interval,
)
except Exception as e:
self._handle_error(e)
def epoch_start(self):
if self.deleted:
return
try:
while self.flask_server.training_paused:
time.sleep(2)
self.tracker.epoch_start()
self.epoch_counter += 1
except Exception as e:
self._handle_error(e)
def epoch_end(self):
if self.deleted:
return
try:
self.tracker.epoch_end()
if self.epoch_counter == self.monitor_epochs:
self._output_actual()
if self.epoch_counter == self.epochs_before_pred:
self._output_pred()
if self.stop_and_confirm:
self._user_query()
if self.epoch_counter == self.monitor_epochs:
self._delete()
except Exception as e:
self._handle_error(e)
def stop(self):
"""Ensure that tracker is stopped and deleted. E.g. use with early
stopping, where not all monitor_epochs have been run."""
if self.deleted:
return
self.logger.info(
f"Training was interrupted before all {self.monitor_epochs} epochs"
" were monitored.")
# Decrement epoch_counter with 1 since measurements for ultimate epoch
# was interrupted and is not accounted for.
self.epoch_counter -= 1
self._output_actual()
self._delete()
def _handle_error(self, error):
err_str = traceback.format_exc()
if self.ignore_errors:
err_str = (f"Ignored error: {err_str}Continued training without "
"monitoring...")
self.logger.err_critical(err_str)
self.logger.output(err_str)
if self.ignore_errors:
# Stop monitoring but continue training.
self._delete()
else:
sys.exit(70)
def _output_energy(self, description, time, energy):
precision = self.decimal_precision
output = (f"\n{description}\n"
f"\tTime:\t{loggerutil.convert_to_timestring(time)}\n"
f"\tEnergy:\t{energy:.{precision}f} kWh\n")
self.logger.output(output, verbose_level=1)
def _output_actual(self):
"""Output actual usage so far."""
energy_usages = self.tracker.total_energy_per_epoch()
energy = energy_usages.sum()
times = self.tracker.epoch_times
time = np.sum(times)
self._output_energy(
f"Actual consumption for {self.epoch_counter} epoch(s):", time,
energy)
def _output_pred(self):
"""Output predicted usage for full training epochs."""
epoch_energy_usages = self.tracker.total_energy_per_epoch()
epoch_times = self.tracker.epoch_times
pred_energy = predictor.predict_energy(self.epochs,
epoch_energy_usages)
pred_time = predictor.predict_time(self.epochs, epoch_times)
self._output_energy(
f"Predicted consumption for {self.epochs} epoch(s):", pred_time,
pred_energy)
def _user_query(self):
self.logger.output("Continue training (y/n)?")
user_input = input().lower()
self._check_input(user_input)
def _check_input(self, user_input):
if user_input == "y":
self.logger.output("Continuing...")
return
elif user_input == "n":
self.logger.info("Session ended by user.")
self.logger.output("Quitting...")
sys.exit(0)
else:
self.logger.output("Input not recognized. Try again (y/n):")
user_input = input().lower()
self._check_input(user_input)
def _delete(self):
self.tracker.stop()
del self.logger
del self.tracker
self.deleted = True
def _get_pids(self):
"""Get current process id and all children process ids."""
process = psutil.Process()
pids = [process.pid
] + [child.pid for child in process.children(recursive=True)]
return pids
|
py | b40a2d9c44469c39123c16bf1c84d3dccba200d0 | # Generated by Django 3.2.8 on 2021-11-03 23:44
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StreamEventData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stream_slug', models.CharField(default='', max_length=39, null=True)),
('project_slug', models.CharField(default='', max_length=12, null=True)),
('device_slug', models.CharField(default='', max_length=33, null=True)),
('variable_slug', models.CharField(default='', max_length=18, null=True)),
('device_timestamp', models.BigIntegerField(blank=True, null=True)),
('timestamp', models.DateTimeField(blank=True, null=True)),
('streamer_local_id', models.PositiveIntegerField(default=0)),
('dirty_ts', models.BooleanField(default=False)),
('status', models.CharField(choices=[('unk', 'unknown'), ('cln', 'clean'), ('drt', 'dirty'), ('utc', 'utc timestamp')], default='unk', max_length=3)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('s3_key_path', models.CharField(blank=True, default='', max_length=20, null=True)),
('ext', models.CharField(blank=True, choices=[('json', 'Json Data File'), ('json.gz', 'GZipped Json Data File'), ('csv', 'CSV Data File')], default='json', max_length=10, null=True)),
('extra_data', models.JSONField(blank=True, null=True)),
('format_version', models.IntegerField(default=2)),
],
options={
'verbose_name': 'Stream Event Entry',
'verbose_name_plural': 'Stream Event Entries',
'ordering': ['stream_slug', 'streamer_local_id', 'timestamp'],
},
),
]
|
py | b40a2e98bf80c2cf4ddb30a24b5cd265415c2fed | from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from .one_hot import one_hot
# based on:
# https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
class TverskyLoss(nn.Module):
r"""Criterion that computes Tversky Coeficient loss.
According to [1], we compute the Tversky Coefficient as follows:
.. math::
\text{S}(P, G, \alpha; \beta) =
\frac{|PG|}{|PG| + \alpha |P \ G| + \beta |G \ P|}
where:
- :math:`P` and :math:`G` are the predicted and ground truth binary
labels.
- :math:`\alpha` and :math:`\beta` control the magnitude of the
penalties for FPs and FNs, respectively.
Notes:
- :math:`\alpha = \beta = 0.5` => dice coeff
- :math:`\alpha = \beta = 1` => tanimoto coeff
- :math:`\alpha + \beta = 1` => F beta coeff
Shape:
- Input: :math:`(N, C, H, W)` where C = number of classes.
- Target: :math:`(N, H, W)` where each value is
:math:`0 ≤ targets[i] ≤ C−1`.
Examples:
>>> N = 5 # num_classes
>>> loss = tgm.losses.TverskyLoss(alpha=0.5, beta=0.5)
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = loss(input, target)
>>> output.backward()
References:
[1]: https://arxiv.org/abs/1706.05721
"""
def __init__(self, alpha: float, beta: float) -> None:
super(TverskyLoss, self).__init__()
self.alpha: float = alpha
self.beta: float = beta
self.eps: float = 1e-6
def forward( # type: ignore
self,
input: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
if not torch.is_tensor(input):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(input)))
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxNxHxW. Got: {}"
.format(input.shape))
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError("input and target shapes must be the same. Got: {}"
.format(input.shape, input.shape))
if not input.device == target.device:
raise ValueError(
"input and target must be in the same device. Got: {}" .format(
input.device, target.device))
# compute softmax over the classes axis
input_soft = F.softmax(input, dim=1)
# create the labels one hot tensor
target_one_hot = one_hot(target, num_classes=input.shape[1],
device=input.device, dtype=input.dtype)
# compute the actual dice score
dims = (1, 2, 3)
intersection = torch.sum(input_soft * target_one_hot, dims)
fps = torch.sum(input_soft * (torch.tensor(1.) - target_one_hot), dims)
fns = torch.sum((torch.tensor(1.) - input_soft) * target_one_hot, dims)
numerator = intersection
denominator = intersection + self.alpha * fps + self.beta * fns
tversky_loss = numerator / (denominator + self.eps)
return torch.mean(torch.tensor(1.) - tversky_loss)
######################
# functional interface
######################
def tversky_loss(
input: torch.Tensor,
target: torch.Tensor,
alpha: float,
beta: float) -> torch.Tensor:
r"""Function that computes Tversky loss.
See :class:`~torchgeometry.losses.TverskyLoss` for details.
"""
return TverskyLoss(alpha, beta)(input, target)
|
py | b40a2f804928a9a8e0271eb3eac438c02a005b69 | from robot.errors import PassExecution
from robot.libraries.BuiltIn import BuiltIn
def raise_pass_execution_exception(msg):
raise PassExecution(msg)
def call_pass_execution_method(msg):
BuiltIn().pass_execution(msg, 'lol')
|
py | b40a2f8fcf27655d489ee8a68e5be926d132c13b | from pathlib import Path
import pyglet
from .window import Window
from .view import View
from .scene import CarScene, KeypadScene
from .car import CarGroup, Car
from .palette import Palette
from .keyboard import Keyboard, QWERTY_LAYOUT, NUMPAD_LAYOUT, DVORAK_LAYOUT
from .keypad import Keypad
from .circuit import Circuit
from .tutorial import tutorial
from .anim import fork, Wait
from .title import TitleTop, TitleBottom
#from . import music
palette = Palette()
kbd = Keyboard()
window = Window(kbd)
ctx = window.ctx
# Couldn't get music to work. These tracks would work great:
# - menu: https://opengameart.org/content/arcade-racing-tune
# - game: https://opengameart.org/content/tactical-pursuit
#music.play(music.menu_music)
def global_key_event(action, is_pressed):
if action == 'fullscreen' and is_pressed:
if window.fullscreen:
window.fullscreen = False
else:
window.fullscreen = True
try:
conf = Path('keypad_racer.conf').read_text()
except FileNotFoundError:
kbd.attach_handler(global_key_event)
tutorial(ctx, palette, kbd, window)
else:
circuit = Circuit(ctx, 'okruh.png')
scene = TitleTop(ctx, window, kbd)
window.add_view(View(ctx, scene))
scene = TitleBottom(ctx, window, kbd, circuit, conf) # this has titlescreen logic :/
window.add_view(View(ctx, scene))
def run():
pyglet.clock.schedule_interval(id, 1/60)
pyglet.app.run()
|
py | b40a2fbe1c10b2e923417401a0ab118cfe366996 | print('''
-----------------------------------------------------------------------------------------------------------------
<=== Windows version ===>
*specification*
Supported for versions upto windows xp
-----------------------------------------------------------------------------------------------------------------
''')
# :NOTE:
# The Project is made by Prasoon rai. Don't sell it! Is is under (MIT) and (Apache 2.0) licence <-->
# The 'Snake game' is made by Tim.
# For any queries send a e mail on ('[email protected]').
# <----------------------------- NOTE -------------------------------->
# This is developer pack. VERSION --> (Dev. 2.0 lite)
#
# ENJOY
# <------------------------------------------------------------------->
print('''
WARNING ===>
<==== NOTE ====>
The Project is made by Prasoon rai. Don't sell it! Is is under (MIT) and (Apache 2.0) licence <-->
The 'Snake game' is made by Tim.
For any queries send a e mail on ('[email protected]').
<----------------------------- NOTE -------------------------------->
This is developer pack. VERSION --> (Dev. 2.0 lite)
ENJOY
<------------------------------------------------------------------->
Loading... (If it's not loading quickly this may be because you are starting the program after --> shutdowning , restarting)
''')
import os
import numpy
from pygame import mixer
import time
from tkinter import *
import tkinter.messagebox
from subprocess import *
import webbrowser
import pyttsx3
import webbrowser
import smtplib
import random
import speech_recognition as sr
import wikipedia
import datetime
import wolframalpha
import os
import sys
import time
import win10toast
from subprocess import *
import sounddevice
from scipy.io.wavfile import write
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
from playsound import playsound
import datetime as dt
from gpiozero import Robot
from time import sleep
import win10toast
import time
import pywhatkit as kit
from subprocess import *
import datetime
import time
import sys
from math import floor
import webbrowser
from playsound import playsound
import time
clear = lambda: os.system("cls")
clear
print("Hello from {CMS} AI community. " + "http://cmsplanes.ezyro.com")
#######################################################################################################
# HEY THERE YOU CAN SPECIFY THE AI , YOUR NAME IN THE FOLLOWING SECTION!
#######################################################################################################
# You can enter your name here -->
User = ('Developer 🤑')
# You can enter the name of the AI here -->
a_name = ("assistant")
########################################################################################################
root=Tk()
root.geometry('500x700')
frame = Frame(root, relief=RIDGE, borderwidth=2)
frame.pack(fill=BOTH,expand=1)
root.title('AI ' + (a_name))
frame.config(background='blue')
label = Label(frame, text="AI " + (a_name),bg='light blue',font=('Times 35 bold'))
label.pack(side=TOP)
filename = PhotoImage(file="Logo.png")
background_label = Label(frame,image=filename)
background_label.pack(side=TOP)
def lite():
tkinter.messagebox.showinfo("Lite mode","Lite mode will help in saving your device battery.")
def lite_a():
tkinter.messagebox.showinfo("Lite mode","Lite mode disabled.")
def hel():
webbrowser.open("http://cmsplanes.ezyro.com")
def Contri():
tkinter.messagebox.showinfo("Contributors","The AI was made by Prasoon rai.")
def playlist():
tkinter.messagebox.showinfo("AI playlist","You can find the music on youtube and you can even download it.")
webbrowser.open("https://music.youtube.com/playlist?list=PL8qJFT6AWCtLHzAbUax9HLxrhkWXpQGOa")
sleep(5)
def anotherWin():
tkinter.messagebox.showinfo("About",'AI\n Made Using\n-Python\n')
menu = Menu(root)
root.config(menu=menu)
subm1 = Menu(menu)
menu.add_cascade(label="Manager",menu=subm1)
subm1.add_command(label="More",command=hel)
subm1.add_command(label="Music playlist",command=playlist)
subm2 = Menu(menu)
menu.add_cascade(label="About",menu=subm2)
subm2.add_command(label="Contributors",command=Contri)
def Open():
clear = lambda: os.system("cls")
clear
toaster = win10toast.ToastNotifier()
engine = pyttsx3.init('sapi5')
client = wolframalpha.Client('V7EAL6-JGR56ELT3H')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[len(voices)-3].id)
def speak(audio):
print('AI ' + (a_name) + ':' + audio)
engine.say(audio)
engine.runAndWait()
def greetMe():
currentH = int(datetime.datetime.now().hour)
if currentH >= 0 and currentH < 12:
speak('Good Morning!')
if currentH >= 12 and currentH < 18:
speak('Good Afternoon!')
if currentH >= 18 and currentH !=21:
speak('Good Evening!')
if currentH >= 21 and currentH !=0:
speak("Good night!")
playsound('ui-wakesound.mp3')
def myCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
query = r.recognize_google(audio, language='en-in')
print((User) + ':' + query + '\n')
except sr.UnknownValueError:
playsound('ui-endpointing-touch.mp3')
speak('Sorry sir! I didn\'t catch that! Can you please repeat that?')
playsound('ui-wakesound.mp3')
query = myCommand()
return query
if __name__ == '__main__':
n = 0
if n <= 1:
query = myCommand();
query = query.lower()
if 'open youtube' in query:
speak('okay')
webbrowser.open('www.youtube.com')
elif 'open google' in query:
speak('okay')
webbrowser.open('www.google.co.in')
elif "hello" in query or "hi" in query or "hello arduino" in query or "hi arduino" in query:
greet = "Hello there. 😁😎😍"
speak(greet)
elif 'your help' in query or "arduino help" in query:
speak(" help section -")
speak("Help is loading...")
tip = ['I was designed by prasoon rai' , 'I am getting smarter everyday!' , 'I can play music' , 'What\'s up buddy!']
time.sleep(1)
print("#------- " + "12.5%")
time.sleep(2)
print("##------ " + "25%")
time.sleep(2)
print("###----- " + "37.5%")
time.sleep(2)
print("####---- " + "50%")
time.sleep(2)
print("#####--- " + "62.5% " + 'Do you know?: ' + (random.choice(tip)))
time.sleep(2)
print("######-- " +"75%")
time.sleep(2)
print("#######- " + "87.5%")
time.sleep(2)
print("######## " + "100%")
speak("Done!")
playsound('system-alerts-melodic-01-short.mp3')
print('''
<------------------------------------------------------------------------------------------>
help...
Help -
AI was designed by a solo developer, Prasoon rai.
For more information just say 'more information arduino' or 'information arduino'.
What's upcoming? --->
Well, we are working on new updates right now!
What's new -->
1. You can play game's with! like - Hangman, snake game, golf game, drawing pragram!
Well, soon I am going to bring new updates to the AI like a online chess game!
2. It can even recognize your face!
Don\'t worry because your data is safe and will not be leaked.
3. It can send e-mail, set alarms, play music, call anybody, perform many more amazing things.
Just ask 'what can you do?'
4. I can tell you some of my most amazing features here -
(a) Can tell you the weather of anyplace.
(b) Tell you a story.
(c) Tell you joke.
(d) Perform mathematical operations.
(e) Can do things related to science.
(f) I can be your best friend!
A common Tip by AI:
Ok, please don't ask me such kind of questions, like 'can you walk?, can you dance and can you drive a car
etc...
So, my answer to all such questions is |No|, but if you want me to do such things, make any kind
of robotic body you like (using a raspberry PI) and just tell your Ideas to my developer
at his G-mail ID ([email protected]) and you will get a responce from him under 5 days with basic codes...
Well how to run the codes and other important information will be give with a handy guide added with the E-mail
Well, you can Improve me because I am open source!
Thanks!
<------------------------------------------------------------------------------------------->
All the information given in guide.
In order to open the guide just say 'open help guide' or 'Help guide'.
''')
elif "set a alarm" in query or "set a timer" in query:
speak("Loading Alarm...")
time.sleep(1)
Popen('python delta.py')
time.sleep(10)
elif "help guide" in query or "open help guide" in query:
webbrowser.open("http://cmsplanes.ezyro.com")
elif "how do you understand me" in query or "how do you" in query:
speak("I use Deep neural net algorithms to understand what you say . Well see this diagram to see what I mean.")
print('''
(Computer took a input) (intrepreter change) (Computer understood) (Computer gave output) (Human Language)
Human input eg. 'Hello' ----------------------> 01000101000110010011 -----------------------> Hi there!
''')
speak("The following cycle continues between you and me!")
speak("Want to see a trick? Yes/no")
df = myCommand()
if "yes" in df:
Popen('nurallnet.gif')
elif "n" in df:
speak("Ok.")
else:
playsound('system-alerts-melodic-01-short.mp3')
elif "what are your secrets" in query or "what is your secret" in query or "tell me your secrets" in query:
speak("If, I will tell you my secrets. It will not be a secret")
elif "system info" in query or "info" in query:
speak("Loading...")
speak("Info loaded...")
speak("---------------------------------------------------------------------------------------")
speak("No current updates...")
speak("I automatically get updated.")
speak("---------------------------------------------------------------------------------------")
elif "set a alarm" in query or "set a timer" in query:
inputbyuser = input("At What Time Do You Want The Alarm? [Format -> Hour : Minutes]: ")
amPm = input("AM or PM?: ")
if ":" in inputbyuser:
listt = inputbyuser.split(":")
else:
print("Wrong Format")
target_hrs = int(listt[0])
target_mins = int(listt[1])
if target_hrs == 12:
if amPm.lower() == "am":
target_hrs = 0
if amPm.lower() == "pm":
if target_hrs == 12:
target_hrs = target_hrs
else:
target_hrs = int(target_hrs) + 12
elif amPm.lower() == "am":
target_hrs = target_hrs
current_time_min_value = int(datetime.datetime.now().hour) * 3600 + int(datetime.datetime.now().minute) * 60
target_time_min_value = int(target_hrs) * 3600 + int(target_mins) * 60
alarm_in = int(target_time_min_value) - int(current_time_min_value)
h = int(alarm_in) / 3600
h = floor(h)
m = (int(alarm_in)/60) - h*60
m = floor(m) - 1
s = 60 - int(datetime.datetime.now().second)
if alarm_in < 0:
print("...Wrong Time for Alarm...")
else:
while alarm_in > 0:
sys.stdout.write("\x1b[1A\x1b[2k")
print(h, 'Hours', m, 'Minutes', s, "Seconds")
time.sleep(1)
s-=1
if s == -1:
m -= 1
s = 59
elif m == -1:
h -= 1
m = 59
s = 0
s = 59
elif s == 0 and m == 0 and h == 0:
print(".......ALARM SIRENS.......\n...Timer Complete...")
playsound('system-alerts-melodic-01-short.mp3')
elif "open binge" in query or "open bige" in query:
webbrowser.open('https://www.bing.com/')
elif "open opera" in query or "open opera gaming browser" in query or "open opera gx" in query or "open opera GX" in query:
webbrowser.open('http://opera.com')
print('''
-------------------------------------------------------------------------------------------------------
WARNING!
Opera isn't available for commercial use on browsers. You need to download it.
However, the AI can run opera and opera GX locally.
It's safer to run saffari , google , binge , Microsoft edge and Mozila firefox!
What we recommend?
WINDOWS -
Well our AI is released officialy for Windows 10, 10.1 , 10.2 and Windows 7
so, we recomend Binge , Microsoft edge and Google.
Depends upon your default browser!
MAC -
We, recommend safari for MAC
Linux -
Well, we haven't released it officialy for Linux but you can you it on linux
(Codes and installation to be performed indivussialy)
However, for smoother experience we recommend Mozzila Firefox and Google for linux.
------------------------------------------------------------------------------------------------------
Any loss of data, harm to computer or anything else will not be on our risk if you use Opera / Opera GX
or any opera browser!
------------------------------------------------------------------------------------------------------
Thanks!
-------------------------------------------------------------------------------------------------------
''')
elif "play snake game" in query or "start snake game" in query or "snake game" in query:
speak("Starting snake game...")
Popen('snake.py')
elif "open microsoft" in query or "open mycrosoft account" in query or "open my microsoft account" in query:
webbrowser.open_new_tab('https://account.microsoft.com/')
speak("Happy journey! " + ":-)")
elif "clear" in query or "clear chat" in query:
speak("Clearing...")
speak("This may take some time...")
clear()
print('Loading')
time.sleep(2)
clear()
print('Loading.')
time.sleep(2)
clear()
print('Loading..')
time.sleep(2)
clear()
print('Loading...')
time.sleep(2)
clear()
print('Loading')
time.sleep(2)
clear()
clear()
speak("Chat history is deleted...")
clear()
speak("Loading codes...")
time.sleep(2)
print("--------------------------------------------------------------------------------")
speak("Basic cascade loaded")
time.sleep(1)
speak("done")
"""
elif "move forward" in query or "move forveward" in query:
robot.forward()
sleep(5)
robot.stop()
#To make the robo move forward...
elif "move backward" in query or "move bakward" in query:
robot.backward()
sleep(5)
robot.stop()
#To make the robot move backward...
elif "turn left" in query or "move left" in query:
robot.left()
sleep(5)
robot.stop()
#To make the robot move left...
elif "move right" in query or "turn right" in query:
robot.right()
sleep(5)
robot.stop()
#To make the robot move right...
"""
elif "open python drawing program" in query or "python drawing" in query or "open python drawing" in query:
Popen('python main.py')
elif "record my voice" in query or "make a announcement" in query:
speak("Ok, starting to record your voice!")
fs=44100
second=10
print("recording -")
record_voice=sounddevice.rec(int(second * fs),sample=fs,channels=2)
sounddevice.wait()
write("output.wav",fs,record_voice)
elif "open hangman game" in query or "start hangman" in query or "open python hangman game" in query or "open python hangman" in query:
Popen('python hangman.py')
elif "play" in query:
speak("Ok.")
kit.playonyt(query)
elif "remember that" in query or "remember" in query:
speak("Ok, I will remember that you told me,")
speak("To," + (query))
elif "where do i kept" in query or "where is my" in query or "tell me where i kept" in query or "where i kept my" in query:
speak("You told me to remember that -")
speak(query)
elif "show my history" in query or "show my browsing history" in query:
speak("Heres your browsing history!")
time.sleep(15)
elif "send a notification" in query or "send me a notification" in query:
speak("What should I send as a notification?")
notification_con = myCommand()
toaster = win10toast.ToastNotifier()
toaster.show_toast((notification_con) , duration=7)
elif 'open gmail' in query:
speak('okay')
webbrowser.open('www.gmail.com')
elif "open my os" in query or "open rc os" in query or "open plane os" in query or "plane os" in query or "open cms" in query:
Popen('OS.bat')
elif "would you marry me" in query or "will you marry me" in query or "marry me" in query:
speak("Well before answering this question")
speak("You have to answer some questions!")
speak("Are you ready?")
speak("Let's begin!")
speak("1. When is my birthday?")
A_A = myCommand()
if "8 january" in A_A or "eight january" in A_A or "Eight January" in A_A:
speak('Wow! great')
speak("Let's move towards next question!")
else:
("Wrong answer!")
while(1):
break
speak("1. What is my favourite colour?")
B_B = myCommand()
if "red" in B_B or "Red" in B_B:
speak("Correct answer!")
speak("Let's move toward\'s next question")
speak("This one is going to be a bit harder!")
else:
speak("Wrong")
while(1):
break
speak("3. Who made me?")
C_C = myCommand()
if "Prasoon rai" in C_C or "prasoon rai" in C_C:
speak("Great! but")
speak("Right, now I am wrapping my head around the concept of love")
speak("So, for now I would like to answer by this song!")
webbrowser.open('https://www.youtube.com/watch?v=lNmAkWvnWEg')
else:
speak('Nah...')
speak("I, think we need more time to get to know each other!")
elif "what\'s up" in query or 'how are you' in query:
stMsgs = ['Just doing my thing!', 'I am fine!', 'Nice!', 'I am nice and full of energy']
speak(random.choice(stMsgs))
elif "make my notes" in query or "make my school notes" in query:
speak("Let's start making your notes...")
speak("Enter your notes here -")
speak("To exit notes just say, exit notes")
note_content = myCommand()
if "exit notes" in note_content or "exit note" in note_content:
while(1):
quit
elif 'email' in query:
speak('Who is the recipient? ')
recipient = myCommand()
if 'me' in recipient or 'MI' in recipient or "mi" in recipient:
try:
speak('What should I say? ')
content = myCommand()
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login("Your_Mail", 'Passward')
server.sendmail('Your_Mail', "Your_Mail", content)
speak('Email sent!')
server.close()
except:
speak('Sorry Sir! I am unable to send your message at this moment!')
elif 'nothing' in query or 'abort' in query:
speak('okay')
speak('Bye Sir, have a good day.')
sys.exit()
elif 'hello' in query:
speak('Hello Sir')
elif "stop" in query:
sys.exit
elif "information AI" in query or "more information AI" in query:
speak("Ok")
elif "Who made you" in query or "who is your master" in query or "who made you" in query:
ans_m = ("Prasoon rai made me... Thanks to him!")
speak(ans_m)
elif "send a whatsapp message" in query or "send a message on whatsapp" in query or "send a message" in query:
speak("What is the recipent's phone number?")
what_no = myCommand()
speak("Sorry, please try again later. 😝😜")
clear()
elif 'bye' in query:
speak('Bye Sir, have a good day.')
sys.exit()
else:
query = query
speak('Searching...')
try:
try:
res = client.query(query)
results = next(res.results).text
speak("Here's what I found on the web.")
speak(results)
except:
results = wikipedia.summary(query, sentences=2)
speak(results)
except:
webbrowser.open('https://www.bing.com/')
speak("Sorry, I am unable to help with that!" + " 🤐")
print('-----------------------------------------------------------------------------------------------------------')
a = ('🎤')
but5=Button(frame,padx=1,pady=5,width=3,bg='white',fg='black',relief=GROOVE,text=a,command=Open,font=('helvetica 15 bold'))
but5.place(x=360,y=590)
root.mainloop()
print('Thanks! for choosing me ' + (User)) |
py | b40a30a5e80a8c60fb14b8dccb25f56dd228e3d2 | # coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from mailslurp_client.configuration import Configuration
class PageExpiredInboxRecordProjection(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'content': 'list[ExpiredInboxRecordProjection]',
'empty': 'bool',
'first': 'bool',
'last': 'bool',
'number': 'int',
'number_of_elements': 'int',
'pageable': 'Pageable',
'size': 'int',
'sort': 'Sort',
'total_elements': 'int',
'total_pages': 'int'
}
attribute_map = {
'content': 'content',
'empty': 'empty',
'first': 'first',
'last': 'last',
'number': 'number',
'number_of_elements': 'numberOfElements',
'pageable': 'pageable',
'size': 'size',
'sort': 'sort',
'total_elements': 'totalElements',
'total_pages': 'totalPages'
}
def __init__(self, content=None, empty=None, first=None, last=None, number=None, number_of_elements=None, pageable=None, size=None, sort=None, total_elements=None, total_pages=None, local_vars_configuration=None): # noqa: E501
"""PageExpiredInboxRecordProjection - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._content = None
self._empty = None
self._first = None
self._last = None
self._number = None
self._number_of_elements = None
self._pageable = None
self._size = None
self._sort = None
self._total_elements = None
self._total_pages = None
self.discriminator = None
if content is not None:
self.content = content
if empty is not None:
self.empty = empty
if first is not None:
self.first = first
if last is not None:
self.last = last
if number is not None:
self.number = number
if number_of_elements is not None:
self.number_of_elements = number_of_elements
if pageable is not None:
self.pageable = pageable
if size is not None:
self.size = size
if sort is not None:
self.sort = sort
if total_elements is not None:
self.total_elements = total_elements
if total_pages is not None:
self.total_pages = total_pages
@property
def content(self):
"""Gets the content of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The content of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: list[ExpiredInboxRecordProjection]
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this PageExpiredInboxRecordProjection.
:param content: The content of this PageExpiredInboxRecordProjection. # noqa: E501
:type: list[ExpiredInboxRecordProjection]
"""
self._content = content
@property
def empty(self):
"""Gets the empty of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The empty of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: bool
"""
return self._empty
@empty.setter
def empty(self, empty):
"""Sets the empty of this PageExpiredInboxRecordProjection.
:param empty: The empty of this PageExpiredInboxRecordProjection. # noqa: E501
:type: bool
"""
self._empty = empty
@property
def first(self):
"""Gets the first of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The first of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: bool
"""
return self._first
@first.setter
def first(self, first):
"""Sets the first of this PageExpiredInboxRecordProjection.
:param first: The first of this PageExpiredInboxRecordProjection. # noqa: E501
:type: bool
"""
self._first = first
@property
def last(self):
"""Gets the last of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The last of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: bool
"""
return self._last
@last.setter
def last(self, last):
"""Sets the last of this PageExpiredInboxRecordProjection.
:param last: The last of this PageExpiredInboxRecordProjection. # noqa: E501
:type: bool
"""
self._last = last
@property
def number(self):
"""Gets the number of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The number of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: int
"""
return self._number
@number.setter
def number(self, number):
"""Sets the number of this PageExpiredInboxRecordProjection.
:param number: The number of this PageExpiredInboxRecordProjection. # noqa: E501
:type: int
"""
self._number = number
@property
def number_of_elements(self):
"""Gets the number_of_elements of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The number_of_elements of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: int
"""
return self._number_of_elements
@number_of_elements.setter
def number_of_elements(self, number_of_elements):
"""Sets the number_of_elements of this PageExpiredInboxRecordProjection.
:param number_of_elements: The number_of_elements of this PageExpiredInboxRecordProjection. # noqa: E501
:type: int
"""
self._number_of_elements = number_of_elements
@property
def pageable(self):
"""Gets the pageable of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The pageable of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: Pageable
"""
return self._pageable
@pageable.setter
def pageable(self, pageable):
"""Sets the pageable of this PageExpiredInboxRecordProjection.
:param pageable: The pageable of this PageExpiredInboxRecordProjection. # noqa: E501
:type: Pageable
"""
self._pageable = pageable
@property
def size(self):
"""Gets the size of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The size of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this PageExpiredInboxRecordProjection.
:param size: The size of this PageExpiredInboxRecordProjection. # noqa: E501
:type: int
"""
self._size = size
@property
def sort(self):
"""Gets the sort of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The sort of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: Sort
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this PageExpiredInboxRecordProjection.
:param sort: The sort of this PageExpiredInboxRecordProjection. # noqa: E501
:type: Sort
"""
self._sort = sort
@property
def total_elements(self):
"""Gets the total_elements of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The total_elements of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: int
"""
return self._total_elements
@total_elements.setter
def total_elements(self, total_elements):
"""Sets the total_elements of this PageExpiredInboxRecordProjection.
:param total_elements: The total_elements of this PageExpiredInboxRecordProjection. # noqa: E501
:type: int
"""
self._total_elements = total_elements
@property
def total_pages(self):
"""Gets the total_pages of this PageExpiredInboxRecordProjection. # noqa: E501
:return: The total_pages of this PageExpiredInboxRecordProjection. # noqa: E501
:rtype: int
"""
return self._total_pages
@total_pages.setter
def total_pages(self, total_pages):
"""Sets the total_pages of this PageExpiredInboxRecordProjection.
:param total_pages: The total_pages of this PageExpiredInboxRecordProjection. # noqa: E501
:type: int
"""
self._total_pages = total_pages
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PageExpiredInboxRecordProjection):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PageExpiredInboxRecordProjection):
return True
return self.to_dict() != other.to_dict()
|
py | b40a3134f98d743042f4846b2b9fa84fb32f093d | ###############################################################################
# Copyright 2018 The AnPyLar Team. All Rights Reserved.
# Use of this source code is governed by an MIT-style license that
# can be found in the LICENSE file at http://anpylar.com/mit-license
###############################################################################
from anpylar import Module
from .app_component import AppComponent
from .compose_message import ComposeMessageComponent
from .page_not_found_component import PageNotFoundComponent
from .admin import AdminModule
from .disaster_center import DisasterCenterModule
from .pyroes import PyroesModule
from .login import LoginModule
from .auth_service import AuthService
from .dialog_service import DialogService
class AppModule(Module):
modules = LoginModule, PyroesModule
components = AppComponent
bindings = {}
services = {
'auth_service': AuthService,
'dialog_service': DialogService,
}
routes = [
{'path': 'compose', 'component': ComposeMessageComponent,
'outlet': 'popup'},
{'path': 'disaster-center', 'load_children': [DisasterCenterModule]},
{'path': 'admin', 'load_children': [AdminModule]},
{'path': '', 'redirect_to': '/superpyroes', 'path_match': 'full'},
{'path': '*', 'component': PageNotFoundComponent},
]
|
py | b40a3152937efab2d8abd5955e56019f8d58f5f8 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Nature Test Case"""
from typing import Optional
from abc import ABC
import warnings
import inspect
import logging
import os
import unittest
import time
# disable deprecation warnings that can cause log output overflow
# pylint: disable=unused-argument
def _noop(*args, **kargs):
pass
# disable warning messages
# warnings.warn = _noop
class QiskitNatureTestCase(unittest.TestCase, ABC):
"""Nature Test Case"""
moduleName = None
log = None
def setUp(self) -> None:
warnings.filterwarnings("default", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning, module="pyscf")
warnings.filterwarnings(action="ignore", category=DeprecationWarning, module=".*drivers*")
warnings.filterwarnings(
action="default", category=DeprecationWarning, module=".*drivers.second_quantization*"
)
warnings.filterwarnings(
action="ignore", category=DeprecationWarning, module=".*transformers*"
)
warnings.filterwarnings(
action="default",
category=DeprecationWarning,
module=".*transformers.second_quantization*",
)
self._started_at = time.time()
self._class_location = __file__
def tearDown(self) -> None:
elapsed = time.time() - self._started_at
if elapsed > 5.0:
print(f"({round(elapsed, 2):.2f}s)", flush=True)
@classmethod
def setUpClass(cls) -> None:
cls.moduleName = os.path.splitext(inspect.getfile(cls))[0]
cls.log = logging.getLogger(cls.__name__)
# Set logging to file and stdout if the LOG_LEVEL environment variable
# is set.
if os.getenv("LOG_LEVEL"):
# Set up formatter.
log_fmt = f"{cls.__name__}.%(funcName)s:%(levelname)s:%(asctime)s:" " %(message)s"
formatter = logging.Formatter(log_fmt)
# Set up the file handler.
log_file_name = f"{cls.moduleName}.log"
file_handler = logging.FileHandler(log_file_name)
file_handler.setFormatter(formatter)
cls.log.addHandler(file_handler)
# Set the logging level from the environment variable, defaulting
# to INFO if it is not a valid level.
level = logging._nameToLevel.get(os.getenv("LOG_LEVEL"), logging.INFO)
cls.log.setLevel(level)
def get_resource_path(self, filename: str, path: Optional[str] = None) -> str:
"""Get the absolute path to a resource.
Args:
filename: filename or relative path to the resource.
path: path used as relative to the filename.
Returns:
str: the absolute path to the resource.
"""
root = os.path.dirname(self._class_location)
path = root if path is None else os.path.join(root, path)
return os.path.normpath(os.path.join(path, filename))
|
py | b40a3157f61570124500f041b9792673ad211d25 | #Dan Blankenberg
class fastaSequence( object ):
def __init__( self ):
self.identifier = None
self.sequence = '' #holds raw sequence string: no whitespace
def __len__( self ):
return len( self.sequence )
def __str__( self ):
return "%s\n%s\n" % ( self.identifier, self.sequence )
class fastaReader( object ):
def __init__( self, fh ):
self.file = fh
def close( self ):
return self.file.close()
def next( self ):
line = self.file.readline()
#remove header comment lines
while line and line.startswith( '#' ):
line = self.file.readline()
if not line:
raise StopIteration
assert line.startswith( '>' ), "FASTA headers must start with >"
rval = fastaSequence()
rval.identifier = line.strip()
offset = self.file.tell()
while True:
line = self.file.readline()
if not line or line.startswith( '>' ):
if line:
self.file.seek( offset ) #this causes sequence id lines to be read twice, once to determine previous sequence end and again when getting actual sequence; can we cache this to prevent it from being re-read?
return rval
#454 qual test data that was used has decimal scores that don't have trailing spaces
#so we'll need to parse and build these sequences not based upon de facto standards
#i.e. in a less than ideal fashion
line = line.rstrip()
if ' ' in rval.sequence or ' ' in line:
rval.sequence = "%s%s " % ( rval.sequence, line )
else:
rval.sequence += line
offset = self.file.tell()
def __iter__( self ):
while True:
yield self.next()
class fastaNamedReader( object ):
def __init__( self, fh ):
self.file = fh
self.reader = fastaReader( self.file )
self.offset_dict = {}
self.eof = False
def close( self ):
return self.file.close()
def get( self, sequence_id ):
if not isinstance( sequence_id, basestring ):
sequence_id = sequence_id.identifier
rval = None
if sequence_id in self.offset_dict:
initial_offset = self.file.tell()
seq_offset = self.offset_dict[ sequence_id ].pop( 0 )
if not self.offset_dict[ sequence_id ]:
del self.offset_dict[ sequence_id ]
self.file.seek( seq_offset )
rval = self.reader.next()
self.file.seek( initial_offset )
else:
while True:
offset = self.file.tell()
try:
fasta_seq = self.reader.next()
except StopIteration:
self.eof = True
break #eof, id not found, will return None
if fasta_seq.identifier == sequence_id:
rval = fasta_seq
break
else:
if fasta_seq.identifier not in self.offset_dict:
self.offset_dict[ fasta_seq.identifier ] = []
self.offset_dict[ fasta_seq.identifier ].append( offset )
return rval
def has_data( self ):
#returns a string representation of remaining data, or empty string (False) if no data remaining
eof = self.eof
count = 0
rval = ''
if self.offset_dict:
count = sum( map( len, self.offset_dict.values() ) )
if not eof:
offset = self.file.tell()
try:
fasta_seq = self.reader.next()
except StopIteration:
eof = True
self.file.seek( offset )
if count:
rval = "There were %i known sequences not utilized. " % count
if not eof:
rval = "%s%s" % ( rval, "An additional unknown number of sequences exist in the input that were not utilized." )
return rval
class fastaWriter( object ):
def __init__( self, fh ):
self.file = fh
def write( self, fastq_read ):
#this will include color space adapter base if applicable
self.file.write( ">%s\n%s\n" % ( fastq_read.identifier[1:], fastq_read.sequence ) )
def close( self ):
return self.file.close()
|
py | b40a31f7f2f22308de14633d779342f96c18c2e8 | # encoding: UTF-8
"""
残差周期交叉策略:
"""
from __future__ import division
from vnpy.app.cta_strategy.ctaTemplatePatch import CtaTemplatePatch
import talib as ta
########################################################################
class LinearStrategy(CtaTemplatePatch):
"""残差周期交叉策略"""
className = 'LinearStrategy'
author = u'renxg'
regPeriod = 60
residualSmaPeriod = 12
residualLmaPeriod = 36
parameters = CtaTemplatePatch.parameters + [
'regPeriod', 'residualSmaPeriod', 'residualLmaPeriod'
]
#----------------------------------------------------------------------
def onXminBar(self, bar):
"""收到X分钟K线"""
super(LinearStrategy, self).onXminBar(bar)
if not self.trading:
return
if not self.am.inited:
return
# 发出状态更新事件
if self.trading:
direction = self.getSignalPos()
if self.pos == 0:
#空仓,开新仓
self.filterTrade(direction)
else:
#持仓相反,平仓 (没有方向时不平仓)
if self.direction * direction < 0:
self.clearOrder()
#----------------------------------------------------------------------
def filterTrade(self, direction):
"""按规则过滤交易"""
if direction == 0:
return
self.trade(self.fixedSize * direction)
self.put_event()
#----------------------------------------------------------------------
def getSignalPos(self):
"""计算指标数据"""
# 指标计算
am = self.am
prediction = ta.LINEARREG(am.close, self.regPeriod)
residual = (am.close - prediction)
residualSma = ta.MA(residual, self.residualSmaPeriod)
residualLma = ta.MA(residual, self.residualLmaPeriod)
residualUp = residualSma[-1] > residualLma[-1]
residualDn = residualSma[-1] < residualLma[-1]
# 进出场逻辑
if (residualUp):
return 1
if (residualDn):
return -1
return 0
|
py | b40a32747be8a614943a87d1215e8d0787dfa9a7 | """Module for the Accessory classes."""
import itertools
import logging
from uuid import UUID
from pyhap import SUPPORT_QR_CODE, util
from pyhap.const import (
CATEGORY_BRIDGE,
CATEGORY_OTHER,
HAP_REPR_AID,
HAP_REPR_IID,
HAP_PROTOCOL_VERSION,
HAP_REPR_SERVICES,
HAP_REPR_VALUE,
STANDALONE_AID,
)
from pyhap.iid_manager import IIDManager
from pyhap.service import Service
if SUPPORT_QR_CODE:
import base36
from pyqrcode import QRCode
HAP_PROTOCOL_INFORMATION_SERVICE_UUID = UUID("000000A2-0000-1000-8000-0026BB765291")
logger = logging.getLogger(__name__)
class Accessory:
"""A representation of a HAP accessory.
Inherit from this class to build your own accessories.
"""
category = CATEGORY_OTHER
def __init__(self, driver, display_name, aid=None):
"""Initialise with the given properties.
:param display_name: Name to be displayed in the Home app.
:type display_name: str
:param aid: The accessory ID, uniquely identifying this accessory.
`Accessories` that advertised on the network must have the
standalone AID. Defaults to None, in which case the `AccessoryDriver`
will assign the standalone AID to this `Accessory`.
:type aid: int
"""
self.aid = aid
self.display_name = display_name
self.driver = driver
self.services = []
self.iid_manager = IIDManager()
self.add_info_service()
if aid == STANDALONE_AID:
self.add_protocol_version_service()
def __repr__(self):
"""Return the representation of the accessory."""
services = [s.display_name for s in self.services]
return "<accessory display_name='{}' services={}>".format(
self.display_name, services
)
@property
def available(self):
"""Accessory is available.
If available is False, get_characteristics will return
SERVICE_COMMUNICATION_FAILURE for the accessory which will
show as unavailable.
Expected to be overridden.
"""
return True
def add_info_service(self):
"""Helper method to add the required `AccessoryInformation` service.
Called in `__init__` to be sure that it is the first service added.
May be overridden.
"""
serv_info = self.driver.loader.get_service("AccessoryInformation")
serv_info.configure_char("Name", value=self.display_name)
serv_info.configure_char("SerialNumber", value="default")
self.add_service(serv_info)
def add_protocol_version_service(self):
"""Helper method to add the required HAP Protocol Information service"""
serv_hap_proto_info = Service(
HAP_PROTOCOL_INFORMATION_SERVICE_UUID,
"HAPProtocolInformation"
)
serv_hap_proto_info.add_characteristic(self.driver.loader.get_char("Version"))
serv_hap_proto_info.configure_char("Version", value=HAP_PROTOCOL_VERSION)
self.add_service(serv_hap_proto_info)
def set_info_service(
self, firmware_revision=None, manufacturer=None, model=None, serial_number=None
):
"""Quick assign basic accessory information."""
serv_info = self.get_service("AccessoryInformation")
if firmware_revision:
serv_info.configure_char("FirmwareRevision", value=firmware_revision)
if manufacturer:
serv_info.configure_char("Manufacturer", value=manufacturer)
if model:
serv_info.configure_char("Model", value=model)
if serial_number is not None:
if len(serial_number) >= 1:
serv_info.configure_char("SerialNumber", value=serial_number)
else:
logger.warning(
"Couldn't add SerialNumber for %s. The SerialNumber must "
"be at least one character long.",
self.display_name,
)
def add_preload_service(self, service, chars=None):
"""Create a service with the given name and add it to this acc."""
service = self.driver.loader.get_service(service)
if chars:
chars = chars if isinstance(chars, list) else [chars]
for char_name in chars:
char = self.driver.loader.get_char(char_name)
service.add_characteristic(char)
self.add_service(service)
return service
def set_primary_service(self, primary_service):
"""Set the primary service of the acc."""
for service in self.services:
service.is_primary_service = service.type_id == primary_service.type_id
def add_service(self, *servs):
"""Add the given services to this Accessory.
This also assigns unique IIDS to the services and their Characteristics.
.. note:: Do not add or remove characteristics from services that have been added
to an Accessory, as this will lead to inconsistent IIDs.
:param servs: Variable number of services to add to this Accessory.
:type: Service
"""
for s in servs:
self.services.append(s)
self.iid_manager.assign(s)
s.broker = self
for c in s.characteristics:
self.iid_manager.assign(c)
c.broker = self
def get_service(self, name):
"""Return a Service with the given name.
A single Service is returned even if more than one Service with the same name
are present.
:param name: The display_name of the Service to search for.
:type name: str
:return: A Service with the given name or None if no such service exists in this
Accessory.
:rtype: Service
"""
return next((s for s in self.services if s.display_name == name), None)
def xhm_uri(self):
"""Generates the X-HM:// uri (Setup Code URI)
:rtype: str
"""
payload = 0
payload |= 0 & 0x7 # version
payload <<= 4
payload |= 0 & 0xF # reserved bits
payload <<= 8
payload |= self.category & 0xFF # category
payload <<= 4
payload |= 2 & 0xF # flags
payload <<= 27
payload |= (
int(self.driver.state.pincode.replace(b"-", b""), 10) & 0x7FFFFFFF
) # pincode
encoded_payload = base36.dumps(payload).upper()
encoded_payload = encoded_payload.rjust(9, "0")
return "X-HM://" + encoded_payload + self.driver.state.setup_id
def get_characteristic(self, aid, iid):
"""Get the characteristic for the given IID.
The AID is used to verify if the search is in the correct accessory.
"""
if aid != self.aid:
return None
return self.iid_manager.get_obj(iid)
def to_HAP(self):
"""A HAP representation of this Accessory.
:return: A HAP representation of this accessory. For example:
.. code-block:: python
{ "aid": 1,
"services": [{
"iid" 2,
"type": ...,
...
}]
}
:rtype: dict
"""
return {
HAP_REPR_AID: self.aid,
HAP_REPR_SERVICES: [s.to_HAP() for s in self.services],
}
def setup_message(self):
"""Print setup message to console.
For QRCode `base36`, `pyqrcode` are required.
Installation through `pip install HAP-python[QRCode]`
"""
pincode = self.driver.state.pincode.decode()
if SUPPORT_QR_CODE:
xhm_uri = self.xhm_uri()
print("Setup payload: {}".format(xhm_uri), flush=True)
print(
"Scan this code with your HomeKit app on your iOS device:", flush=True
)
print(QRCode(xhm_uri).terminal(quiet_zone=2), flush=True)
print(
"Or enter this code in your HomeKit app on your iOS device: "
"{}".format(pincode),
flush=True,
)
else:
print(
"To use the QR Code feature, use 'pip install " "HAP-python[QRCode]'",
flush=True,
)
print(
"Enter this code in your HomeKit app on your iOS device: {}".format(
pincode
),
flush=True,
)
@staticmethod
def run_at_interval(seconds):
"""Decorator that runs decorated method every x seconds, until stopped.
Can be used with normal and async methods.
.. code-block:: python
@Accessory.run_at_interval(3)
def run(self):
print("Hello again world!")
:param seconds: The amount of seconds to wait for the event to be set.
Determines the interval on which the decorated method will be called.
:type seconds: float
"""
def _repeat(func):
async def _wrapper(self, *args):
while True:
await self.driver.async_add_job(func, self, *args)
if await util.event_wait(self.driver.aio_stop_event, seconds):
break
return _wrapper
return _repeat
async def run(self):
"""Called when the Accessory should start doing its thing.
Called when HAP server is running, advertising is set, etc.
Can be overridden with a normal or async method.
"""
async def start(self):
"""Called to do any startup an accessory requires
Can be overridden with a normal or async method.
"""
async def stop(self):
"""Called when the Accessory should stop what is doing and clean up any resources.
Can be overridden with a normal or async method.
"""
# Driver
def publish(self, value, sender, sender_client_addr=None, immediate=False):
"""Append AID and IID of the sender and forward it to the driver.
Characteristics call this method to send updates.
:param data: Data to publish, usually from a Characteristic.
:type data: dict
:param sender: The Service or Characteristic from which the call originated.
:type: Service or Characteristic
"""
acc_data = {
HAP_REPR_AID: self.aid,
HAP_REPR_IID: self.iid_manager.get_iid(sender),
HAP_REPR_VALUE: value,
}
self.driver.publish(acc_data, sender_client_addr, immediate)
class Bridge(Accessory):
"""A representation of a HAP bridge.
A `Bridge` can have multiple `Accessories`.
"""
category = CATEGORY_BRIDGE
def __init__(self, driver, display_name):
super().__init__(driver, display_name, aid=STANDALONE_AID)
self.accessories = {} # aid: acc
def add_accessory(self, acc):
"""Add the given ``Accessory`` to this ``Bridge``.
Every ``Accessory`` in a ``Bridge`` must have an AID and this AID must be
unique among all the ``Accessories`` in the same `Bridge`. If the given
``Accessory``'s AID is None, a unique AID will be assigned to it. Otherwise,
it will be verified that the AID is not the standalone aid (``STANDALONE_AID``)
and that there is no other ``Accessory`` already in this ``Bridge`` with that AID.
.. note:: A ``Bridge`` cannot be added to another ``Bridge``.
:param acc: The ``Accessory`` to be bridged.
:type acc: Accessory
:raise ValueError: When the given ``Accessory`` is of category ``CATEGORY_BRIDGE``
or if the AID of the ``Accessory`` clashes with another ``Accessory`` already in this
``Bridge``.
"""
if acc.category == CATEGORY_BRIDGE:
raise ValueError("Bridges cannot be bridged")
if acc.aid is None:
# For some reason AID=7 gets unsupported. See issue #61
acc.aid = next(
aid
for aid in itertools.count(2)
if aid != 7 and aid not in self.accessories
)
elif acc.aid == self.aid or acc.aid in self.accessories:
raise ValueError("Duplicate AID found when attempting to add accessory")
self.accessories[acc.aid] = acc
def to_HAP(self):
"""Returns a HAP representation of itself and all contained accessories.
.. seealso:: Accessory.to_HAP
"""
return [acc.to_HAP() for acc in (super(), *self.accessories.values())]
def get_characteristic(self, aid, iid):
""".. seealso:: Accessory.to_HAP"""
if self.aid == aid:
return self.iid_manager.get_obj(iid)
acc = self.accessories.get(aid)
if acc is None:
return None
return acc.get_characteristic(aid, iid)
async def run(self):
"""Schedule tasks for each of the accessories' run method."""
for acc in self.accessories.values():
self.driver.async_add_job(acc.run)
async def stop(self):
"""Calls stop() on all contained accessories."""
await self.driver.async_add_job(super().stop)
for acc in self.accessories.values():
await self.driver.async_add_job(acc.stop)
def get_topic(aid, iid):
return str(aid) + "." + str(iid)
|
py | b40a32b686435b8fc1a9f1c07fbd948517b86d1f | from montague import load_app as montague_loadapp
from paste.deploy import loadapp as paste_loadapp
import os
import montague_testapps
here = os.path.dirname(__file__)
def test_main():
app = paste_loadapp('config:sample_configs/basic_app.ini',
relative_to=here)
assert app is montague_testapps.apps.basic_app
app = montague_loadapp(os.path.join(here, 'sample_configs/basic_app.ini'))
assert app is montague_testapps.apps.basic_app
app = paste_loadapp('config:sample_configs/basic_app.ini',
relative_to=here, name='main')
assert app is montague_testapps.apps.basic_app
app = montague_loadapp(os.path.join(here, 'sample_configs/basic_app.ini'),
name='main')
assert app is montague_testapps.apps.basic_app
def test_other():
app = paste_loadapp('config:sample_configs/basic_app.ini',
relative_to=here, name='other')
assert app is montague_testapps.apps.basic_app2
app = montague_loadapp(os.path.join(here, 'sample_configs/basic_app.ini'),
name='other')
assert app is montague_testapps.apps.basic_app2
def test_composit():
app = paste_loadapp('config:sample_configs/basic_app.ini',
relative_to=here, name='remote_addr')
assert isinstance(app, montague_testapps.apps.RemoteAddrDispatch)
assert app.map['127.0.0.1'] is montague_testapps.apps.basic_app
assert app.map['0.0.0.0'] is montague_testapps.apps.basic_app2
app = montague_loadapp(os.path.join(here, 'sample_configs/basic_app.ini'),
name='remote_addr')
assert isinstance(app, montague_testapps.apps.RemoteAddrDispatch)
assert app.map['127.0.0.1'] is montague_testapps.apps.basic_app
assert app.map['0.0.0.0'] is montague_testapps.apps.basic_app2
|
py | b40a32beba3f2ea2bce95721710001e467bf2d19 | """
The Python compiler only supports {:extern} code on a module level, so the
entire module must be supplied.
"""
import sys, _dafny
assert "Library" == __name__
Library = sys.modules[__name__]
class LibClass:
@staticmethod
def CallMeInt(x):
y = x + 1
z = y + y
return (y, z)
@staticmethod
def CallMeNative(x, b):
if b:
y = x + 1
else:
y = x - 1
return y
class OtherClass:
@staticmethod
def CallMe():
return "OtherClass.CallMe"
class AllDafny:
@staticmethod
def M():
_dafny.print(_dafny.Seq("AllDafny.M\n"))
class Mixed:
def ctor__(self):
pass
@staticmethod
def M():
_dafny.print(_dafny.Seq("Extern static method says: "))
Library.Mixed.P()
@staticmethod
def P():
_dafny.print(_dafny.Seq("Mixed.P\n"))
def IM(self):
_dafny.print(_dafny.Seq("Extern instance method says: "))
(self).IP()
def IP(self):
_dafny.print(_dafny.Seq("Mixed.IP\n"))
@staticmethod
def F():
return (1000) + (Library.Mixed.G())
@staticmethod
def G():
return 1
def IF(self):
return (2000) + ((self).IG())
def IG(self):
return 2
class AllExtern:
@staticmethod
def P():
_dafny.print(_dafny.Seq("AllExtern.P\n"))
|
py | b40a33021056b32a2fa654b6d374f4e585d0623a | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Deft developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
import traceback
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
initialize_datadir,
log_filename,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class DeftTestFramework(object):
"""Base class for a deft test script.
Individual deft test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave deftds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop deftds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing deftd/deft-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
self.log.info("Note: deftds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir))
def start_node(self, i, extra_args=None, stderr=None):
"""Start a deftd"""
node = self.nodes[i]
node.start(extra_args, stderr)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None):
"""Start multiple deftds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i])
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr)
self.stop_node(i)
except Exception as e:
assert 'deftd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "deftd should have exited with an error"
else:
assert_msg = "deftd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("DeftRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(self.options.cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(self.options.cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(self.options.cachedir, "node" + str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("LITECOIND", "deftd"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(self.options.cachedir, i, "debug.log"))
os.remove(log_filename(self.options.cachedir, i, "db.log"))
os.remove(log_filename(self.options.cachedir, i, "peers.dat"))
os.remove(log_filename(self.options.cachedir, i, "fee_estimates.dat"))
for i in range(self.num_nodes):
from_dir = os.path.join(self.options.cachedir, "node" + str(i))
to_dir = os.path.join(self.options.tmpdir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(DeftTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some deftd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LITECOIND", "deftd"),
help="deftd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("LITECOIND", "deftd"),
help="deftd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
py | b40a33dbd4e0d50421511dbcf91810326760b6cb | #! /usr/bin/env python
import os
import re
import sys
import time
import random
import logging
import tempfile
import subprocess
import shutil
import argparse
# params overwrite priority:
# for default:
# default_params < {blackbox,whitebox}_default_params < args
# for simple:
# default_params < {blackbox,whitebox}_default_params <
# simple_default_params <
# {blackbox,whitebox}_simple_default_params < args
expected_values_file = tempfile.NamedTemporaryFile()
default_params = {
"acquire_snapshot_one_in": 10000,
"block_size": 16384,
"cache_size": 1048576,
"clear_column_family_one_in": 0,
"delpercent": 5,
"destroy_db_initially": 0,
"expected_values_path": expected_values_file.name,
"max_background_compactions": 20,
"max_bytes_for_level_base": 10485760,
"max_key": 100000000,
"max_write_buffer_number": 3,
"mmap_read": lambda: random.randint(0, 1),
"nooverwritepercent": 1,
"open_files": 500000,
"prefixpercent": 5,
"progress_reports": 0,
"readpercent": 45,
"reopen": 20,
"snapshot_hold_ops": 100000,
"subcompactions": lambda: random.randint(1, 4),
"target_file_size_base": 2097152,
"target_file_size_multiplier": 2,
"use_full_merge_v1": lambda: random.randint(0, 1),
"use_merge": lambda: random.randint(0, 1),
"verify_checksum": 1,
"write_buffer_size": 4 * 1024 * 1024,
"writepercent": 35,
}
def get_dbname(test_name):
test_tmpdir = os.environ.get("TEST_TMPDIR")
if test_tmpdir is None or test_tmpdir == "":
dbname = tempfile.mkdtemp(prefix='rocksdb_crashtest_' + test_name)
else:
dbname = test_tmpdir + "/rocksdb_crashtest_" + test_name
shutil.rmtree(dbname, True)
return dbname
blackbox_default_params = {
# total time for this script to test db_stress
"duration": 6000,
# time for one db_stress instance to run
"interval": 120,
# since we will be killing anyway, use large value for ops_per_thread
"ops_per_thread": 100000000,
"set_options_one_in": 10000,
"test_batches_snapshots": 1,
}
whitebox_default_params = {
"duration": 10000,
"log2_keys_per_lock": 10,
"ops_per_thread": 200000,
"random_kill_odd": 888887,
"test_batches_snapshots": lambda: random.randint(0, 1),
}
simple_default_params = {
"allow_concurrent_memtable_write": lambda: random.randint(0, 1),
"column_families": 1,
"max_background_compactions": 1,
"max_bytes_for_level_base": 67108864,
"memtablerep": "skip_list",
"prefix_size": 0,
"prefixpercent": 0,
"readpercent": 50,
"target_file_size_base": 16777216,
"target_file_size_multiplier": 1,
"test_batches_snapshots": 0,
"write_buffer_size": 32 * 1024 * 1024,
}
blackbox_simple_default_params = {
"open_files": -1,
"set_options_one_in": 0,
}
whitebox_simple_default_params = {}
def finalize_and_sanitize(src_params):
dest_params = dict([(k, v() if callable(v) else v)
for (k, v) in src_params.items()])
if dest_params.get("allow_concurrent_memtable_write", 1) == 1:
dest_params["memtablerep"] = "skip_list"
return dest_params
def gen_cmd_params(args):
params = {}
params.update(default_params)
if args.test_type == 'blackbox':
params.update(blackbox_default_params)
if args.test_type == 'whitebox':
params.update(whitebox_default_params)
if args.simple:
params.update(simple_default_params)
if args.test_type == 'blackbox':
params.update(blackbox_simple_default_params)
if args.test_type == 'whitebox':
params.update(whitebox_simple_default_params)
for k, v in vars(args).items():
if v is not None:
params[k] = v
return params
def gen_cmd(params, unknown_params):
cmd = ['./db_stress'] + [
'--{0}={1}'.format(k, v)
for k, v in finalize_and_sanitize(params).items()
if k not in set(['test_type', 'simple', 'duration', 'interval',
'random_kill_odd'])
and v is not None] + unknown_params
return cmd
# This script runs and kills db_stress multiple times. It checks consistency
# in case of unsafe crashes in RocksDB.
def blackbox_crash_main(args, unknown_args):
cmd_params = gen_cmd_params(args)
dbname = get_dbname('blackbox')
exit_time = time.time() + cmd_params['duration']
print("Running blackbox-crash-test with \n"
+ "interval_between_crash=" + str(cmd_params['interval']) + "\n"
+ "total-duration=" + str(cmd_params['duration']) + "\n")
while time.time() < exit_time:
run_had_errors = False
killtime = time.time() + cmd_params['interval']
cmd = gen_cmd(dict(
cmd_params.items() +
{'db': dbname}.items()), unknown_args)
child = subprocess.Popen(cmd, stderr=subprocess.PIPE)
print("Running db_stress with pid=%d: %s\n\n"
% (child.pid, ' '.join(cmd)))
stop_early = False
while time.time() < killtime:
if child.poll() is not None:
print("WARNING: db_stress ended before kill: exitcode=%d\n"
% child.returncode)
stop_early = True
break
time.sleep(1)
if not stop_early:
if child.poll() is not None:
print("WARNING: db_stress ended before kill: exitcode=%d\n"
% child.returncode)
else:
child.kill()
print("KILLED %d\n" % child.pid)
time.sleep(1) # time to stabilize after a kill
while True:
line = child.stderr.readline().strip()
if line != '' and not line.startswith('WARNING'):
run_had_errors = True
print('stderr has error message:')
print('***' + line + '***')
else:
break
if run_had_errors:
sys.exit(2)
time.sleep(1) # time to stabilize before the next run
# we need to clean up after ourselves -- only do this on test success
shutil.rmtree(dbname, True)
# This python script runs db_stress multiple times. Some runs with
# kill_random_test that causes rocksdb to crash at various points in code.
def whitebox_crash_main(args, unknown_args):
cmd_params = gen_cmd_params(args)
dbname = get_dbname('whitebox')
cur_time = time.time()
exit_time = cur_time + cmd_params['duration']
half_time = cur_time + cmd_params['duration'] / 2
print("Running whitebox-crash-test with \n"
+ "total-duration=" + str(cmd_params['duration']) + "\n")
total_check_mode = 4
check_mode = 0
kill_random_test = cmd_params['random_kill_odd']
kill_mode = 0
while time.time() < exit_time:
if check_mode == 0:
additional_opts = {
# use large ops per thread since we will kill it anyway
"ops_per_thread": 100 * cmd_params['ops_per_thread'],
}
# run with kill_random_test, with three modes.
# Mode 0 covers all kill points. Mode 1 covers less kill points but
# increases change of triggering them. Mode 2 covers even less
# frequent kill points and further increases triggering change.
if kill_mode == 0:
additional_opts.update({
"kill_random_test": kill_random_test,
})
elif kill_mode == 1:
additional_opts.update({
"kill_random_test": (kill_random_test / 10 + 1),
"kill_prefix_blacklist": "WritableFileWriter::Append,"
+ "WritableFileWriter::WriteBuffered",
})
elif kill_mode == 2:
# TODO: May need to adjust random odds if kill_random_test
# is too small.
additional_opts.update({
"kill_random_test": (kill_random_test / 5000 + 1),
"kill_prefix_blacklist": "WritableFileWriter::Append,"
"WritableFileWriter::WriteBuffered,"
"PosixMmapFile::Allocate,WritableFileWriter::Flush",
})
# Run kill mode 0, 1 and 2 by turn.
kill_mode = (kill_mode + 1) % 3
elif check_mode == 1:
# normal run with universal compaction mode
additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'],
"compaction_style": 1,
}
elif check_mode == 2:
# normal run with FIFO compaction mode
# ops_per_thread is divided by 5 because FIFO compaction
# style is quite a bit slower on reads with lot of files
additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'] / 5,
"compaction_style": 2,
}
else:
# normal run
additional_opts = additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'],
}
cmd = gen_cmd(dict(cmd_params.items() + additional_opts.items()
+ {'db': dbname}.items()), unknown_args)
print "Running:" + ' '.join(cmd) + "\n" # noqa: E999 T25377293 Grandfathered in
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdoutdata, stderrdata = popen.communicate()
retncode = popen.returncode
msg = ("check_mode={0}, kill option={1}, exitcode={2}\n".format(
check_mode, additional_opts['kill_random_test'], retncode))
print msg
print stdoutdata
expected = False
if additional_opts['kill_random_test'] is None and (retncode == 0):
# we expect zero retncode if no kill option
expected = True
elif additional_opts['kill_random_test'] is not None and retncode < 0:
# we expect negative retncode if kill option was given
expected = True
if not expected:
print "TEST FAILED. See kill option and exit code above!!!\n"
sys.exit(1)
stdoutdata = stdoutdata.lower()
errorcount = (stdoutdata.count('error') -
stdoutdata.count('got errors 0 times'))
print "#times error occurred in output is " + str(errorcount) + "\n"
if (errorcount > 0):
print "TEST FAILED. Output has 'error'!!!\n"
sys.exit(2)
if (stdoutdata.find('fail') >= 0):
print "TEST FAILED. Output has 'fail'!!!\n"
sys.exit(2)
# First half of the duration, keep doing kill test. For the next half,
# try different modes.
if time.time() > half_time:
# we need to clean up after ourselves -- only do this on test
# success
shutil.rmtree(dbname, True)
cmd_params.pop('expected_values_path', None)
check_mode = (check_mode + 1) % total_check_mode
time.sleep(1) # time to stabilize after a kill
def main():
parser = argparse.ArgumentParser(description="This script runs and kills \
db_stress multiple times")
parser.add_argument("test_type", choices=["blackbox", "whitebox"])
parser.add_argument("--simple", action="store_true")
all_params = dict(default_params.items()
+ blackbox_default_params.items()
+ whitebox_default_params.items()
+ simple_default_params.items()
+ blackbox_simple_default_params.items()
+ whitebox_simple_default_params.items())
for k, v in all_params.items():
parser.add_argument("--" + k, type=type(v() if callable(v) else v))
# unknown_args are passed directly to db_stress
args, unknown_args = parser.parse_known_args()
if args.test_type == 'blackbox':
blackbox_crash_main(args, unknown_args)
if args.test_type == 'whitebox':
whitebox_crash_main(args, unknown_args)
if __name__ == '__main__':
main()
|
py | b40a3400aff3785cdd1974f2a7c9a428a68dc2c0 | # STUMPY
# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license.
# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.
import logging
import numpy as np
from .maamp import _maamp, _get_first_maamp_profile
from .mstump import _get_multi_QT, _preprocess_include
from . import core
logger = logging.getLogger(__name__)
def maamped(dask_client, T, m, include=None, discords=False):
"""
Compute the multi-dimensional non-normalized (i.e., without z-normalization) matrix
profile with a distributed dask cluster
This is a highly distributed implementation around the Numba JIT-compiled
parallelized `_maamp` function which computes the multi-dimensional matrix
profile according to STOMP. Note that only self-joins are supported.
Parameters
----------
dask_client : client
A Dask Distributed client that is connected to a Dask scheduler and
Dask workers. Setting up a Dask distributed cluster is beyond the
scope of this library. Please refer to the Dask Distributed
documentation.
T : ndarray
The time series or sequence for which to compute the multi-dimensional
matrix profile. Each row in `T` represents data from a different
dimension while each column in `T` represents data from the same
dimension.
m : int
Window size
include : list, ndarray, default None
A list of (zero-based) indices corresponding to the dimensions in `T` that
must be included in the constrained multidimensional motif search.
For more information, see Section IV D in:
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
discords : bool, default False
When set to `True`, this reverses the distance matrix which results in a
multi-dimensional matrix profile that favors larger matrix profile values
(i.e., discords) rather than smaller values (i.e., motifs). Note that indices
in `include` are still maintained and respected.
Returns
-------
P : ndarray
The multi-dimensional matrix profile. Each row of the array corresponds
to each matrix profile for a given dimension (i.e., the first row is
the 1-D matrix profile and the second row is the 2-D matrix profile).
I : ndarray
The multi-dimensional matrix profile index where each row of the array
corresponds to each matrix profile index for a given dimension.
Notes
-----
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
See mSTAMP Algorithm
"""
T_A = T
T_B = T_A
T_A, T_A_subseq_isfinite = core.preprocess_non_normalized(T_A, m)
T_B, T_B_subseq_isfinite = core.preprocess_non_normalized(T_B, m)
T_A_subseq_squared = np.sum(core.rolling_window(T_A * T_A, m), axis=2)
T_B_subseq_squared = np.sum(core.rolling_window(T_B * T_B, m), axis=2)
if T_A.ndim <= 1: # pragma: no cover
err = f"T is {T_A.ndim}-dimensional and must be at least 1-dimensional"
raise ValueError(f"{err}")
core.check_window_size(m, max_size=min(T_A.shape[1], T_B.shape[1]))
if include is not None:
include = _preprocess_include(include)
d, n = T_B.shape
k = n - m + 1
excl_zone = int(np.ceil(m / 4)) # See Definition 3 and Figure 3
P = np.empty((d, k), dtype="float64")
I = np.empty((d, k), dtype="int64")
hosts = list(dask_client.ncores().keys())
nworkers = len(hosts)
step = 1 + k // nworkers
for i, start in enumerate(range(0, k, step)):
P[:, start], I[:, start] = _get_first_maamp_profile(
start,
T_A,
T_B,
m,
excl_zone,
T_B_subseq_isfinite,
include,
discords,
)
# Scatter data to Dask cluster
T_A_future = dask_client.scatter(T_A, broadcast=True, hash=False)
T_A_subseq_isfinite_future = dask_client.scatter(
T_A_subseq_isfinite, broadcast=True, hash=False
)
T_B_subseq_isfinite_future = dask_client.scatter(
T_B_subseq_isfinite, broadcast=True, hash=False
)
T_A_subseq_squared_future = dask_client.scatter(
T_A_subseq_squared, broadcast=True, hash=False
)
T_B_subseq_squared_future = dask_client.scatter(
T_B_subseq_squared, broadcast=True, hash=False
)
QT_futures = []
QT_first_futures = []
for i, start in enumerate(range(0, k, step)):
QT, QT_first = _get_multi_QT(start, T_A, m)
QT_future = dask_client.scatter(QT, workers=[hosts[i]], hash=False)
QT_first_future = dask_client.scatter(QT_first, workers=[hosts[i]], hash=False)
QT_futures.append(QT_future)
QT_first_futures.append(QT_first_future)
futures = []
for i, start in enumerate(range(0, k, step)):
stop = min(k, start + step)
futures.append(
dask_client.submit(
_maamp,
T_A_future,
m,
stop,
excl_zone,
T_A_subseq_isfinite_future,
T_B_subseq_isfinite_future,
T_A_subseq_squared_future,
T_B_subseq_squared_future,
QT_futures[i],
QT_first_futures[i],
k,
start + 1,
include,
discords,
)
)
results = dask_client.gather(futures)
for i, start in enumerate(range(0, k, step)):
stop = min(k, start + step)
P[:, start + 1 : stop], I[:, start + 1 : stop] = results[i]
# Delete data from Dask cluster
dask_client.cancel(T_A_future)
dask_client.cancel(T_A_subseq_isfinite_future)
dask_client.cancel(T_B_subseq_isfinite_future)
dask_client.cancel(T_A_subseq_squared_future)
dask_client.cancel(T_B_subseq_squared_future)
for QT_future in QT_futures:
dask_client.cancel(QT_future)
for QT_first_future in QT_first_futures:
dask_client.cancel(QT_first_future)
for future in futures:
dask_client.cancel(future)
return P, I
|
py | b40a3481e9b8ae3922d852b87a123964bf3625f0 | from datetime import timedelta
import random
class RetrySettings(object):
__default_number_of_retries = 0
__maximum_allowed_number_of_retries = 5
__minimum_retry_time = timedelta(seconds=1)
__maximum_retry_time = timedelta(seconds=10)
def __init__(self, maximum_retries=None):
if maximum_retries:
if maximum_retries < 0:
raise AttributeError("maximumNumberOfRetries must be greater than 0")
if maximum_retries > self.__maximum_allowed_number_of_retries:
raise AttributeError("The maximum number of allowed retries is ",
self.__maximum_allowed_number_of_retries)
self.__maximum_number_of_retries = maximum_retries
else:
self.__maximum_number_of_retries = self.__default_number_of_retries
@property
def maximum_number_of_retries(self):
return self.__maximum_number_of_retries
def get_next_wait_interval(self, number_of_attempts):
interval = int(min(
((self.__minimum_retry_time.seconds * 1000) + self.get_retry_delta(number_of_attempts)),
(self.__maximum_retry_time.seconds * 1000)
))
return timedelta(milliseconds=interval)
@staticmethod
def get_retry_delta(number_of_attempts):
minimum = int((timedelta(seconds=1).seconds * 1000) * 0.8)
maximum = int((timedelta(seconds=1).seconds * 1000) * 1.2)
return int((pow(2.0, number_of_attempts) - 1.0) * random.randint(minimum, maximum))
|
py | b40a364800674467c8ebfba0e6e92a3644bec68e | config = {
'Systen description': 'Sample system to demonstrate project creation',
}
|
py | b40a36be128f5b0b927157b788b8ec6341317759 | """
Copyright (c) 2011 Tencent Inc.
All rights reserved.
Author: Michaelpeng <[email protected]>
Date: October 20, 2011
This is the test module for lex_yacc_library target.
"""
import os
import sys
sys.path.append('..')
import unittest
import subprocess
import blade.blade
import blade.configparse
from blade.blade import Blade
from blade.configparse import BladeConfig
from blade_namespace import Namespace
from html_test_runner import HTMLTestRunner
class TestLexYacc(unittest.TestCase):
"""Test lex_yacc """
def setUp(self):
"""setup method. """
self.command = 'build'
self.targets = ['test_lex_yacc/...']
self.target_path = 'test_lex_yacc'
self.cur_dir = os.getcwd()
os.chdir('./testdata')
self.blade_path = '../../blade'
self.working_dir = '.'
self.current_building_path = 'build64_release'
self.current_source_dir = '.'
self.options = Namespace({'m' : '64',
'profile' : 'release',
'generate_dynamic' : True,
'verbose' : True
})
self.direct_targets = []
self.all_command_targets = []
self.related_targets = {}
# Init global configuration manager
blade.configparse.blade_config = BladeConfig(self.current_source_dir)
blade.configparse.blade_config.parse()
blade.blade.blade = Blade(self.targets,
self.blade_path,
self.working_dir,
self.current_building_path,
self.current_source_dir,
self.options,
blade_command=self.command)
self.blade = blade.blade.blade
(self.direct_targets,
self.all_command_targets) = self.blade.load_targets()
def tearDown(self):
"""tear down method. """
os.chdir(self.cur_dir)
def testLoadBuildsNotNone(self):
"""Test direct targets and all command targets are not none. """
self.assertEqual(self.direct_targets, [])
self.assertTrue(self.all_command_targets)
def testGenerateRules(self):
"""Test that rules are generated correctly. """
self.all_targets = self.blade.analyze_targets()
self.rules_buf = self.blade.generate_build_rules()
cc_library_lower = (self.target_path, 'lowercase')
lex_yacc_library = (self.target_path, 'parser')
self.command_file = 'cmds.tmp'
self.assertTrue(cc_library_lower in self.all_targets.keys())
self.assertTrue(lex_yacc_library in self.all_targets.keys())
p = subprocess.Popen("scons --dry-run > %s" % self.command_file,
stdout=subprocess.PIPE,
shell=True)
try:
p.wait()
self.assertEqual(p.returncode, 0)
com_lower_line = ''
com_bison_line = ''
com_flex_line = ''
com_ll_static_line = ''
com_ll_so_line = ''
com_yy_static_line = ''
com_yy_so_line = ''
lex_yacc_depends_libs = ''
for line in open(self.command_file):
if 'plowercase.cpp.o -c' in line:
com_lower_line = line
if 'bison -d -o' in line:
com_bison_line = line
if 'flex -R -t' in line:
com_flex_line = line
if 'line_parser.ll.o -c' in line:
com_ll_static_line = line
if 'line_parser.yy.o -c' in line:
com_yy_static_line = line
if 'line_parser.ll.os -c' in line:
com_ll_so_line = line
if 'line_parser.yy.os -c' in line:
com_yy_so_line = line
if 'libparser.so' in line:
lex_yacc_depends_libs = line
except:
print sys.exc_info()
self.fail("Failed while dry running in test case")
self.assertTrue('-fPIC -Wall -Wextra' in com_lower_line)
self.assertTrue('-Wframe-larger-than=69632' in com_lower_line)
self.assertTrue('-Werror=overloaded-virtual' in com_lower_line)
self.assertTrue('line_parser.yy.cc' in com_bison_line)
self.assertTrue('line_parser.ll.cc' in com_flex_line)
self.assertTrue('-Woverloaded-virtual' in com_ll_static_line)
self.assertTrue('-Werror=overloaded-virtual' not in com_ll_static_line)
self.assertTrue('-fPIC -Wall -Wextra' in com_ll_so_line)
self.assertTrue('-Wframe-larger-than=69632' in com_ll_so_line)
self.assertTrue('-Werror=overloaded-virtual' not in com_ll_so_line)
self.assertTrue('-Woverloaded-virtual' in com_yy_static_line)
self.assertTrue('-Werror=overloaded-virtual' not in com_yy_static_line)
self.assertTrue('-fPIC -Wall -Wextra' in com_yy_so_line)
self.assertTrue('-Wframe-larger-than=69632' in com_yy_so_line)
self.assertTrue('-Werror=overloaded-virtual' not in com_yy_so_line)
self.assertTrue('liblowercase.so' in lex_yacc_depends_libs)
self.assertTrue('line_parser.ll.os' in lex_yacc_depends_libs)
self.assertTrue('line_parser.yy.os' in lex_yacc_depends_libs)
os.remove('./SConstruct')
os.remove(self.command_file)
if __name__ == "__main__":
suite_test = unittest.TestSuite()
suite_test.addTests(
[unittest.defaultTestLoader.loadTestsFromTestCase(TestLexYacc)])
runner = unittest.TextTestRunner()
runner.run(suite_test)
|
py | b40a3958be384b42f41f64e6a530161b8161469d | import itertools
from typing import Iterable, List, Set, Tuple
from tool.runners.python import SubmissionPy
def parse(s: str) -> List[List[int]]:
res = []
for line in s.splitlines():
if stripped_line := line.strip():
res.append([int(d) for d in stripped_line])
return res
def neighbors(r: int, c: int, nrows: int, ncols: int) -> Iterable[Tuple[int, int]]:
left_space = c > 0
right_space = c + 1 < ncols
if r > 0:
if left_space:
yield r - 1, c - 1
yield r - 1, c
if right_space:
yield r - 1, c + 1
if left_space:
yield r, c - 1
if right_space:
yield r, c + 1
if r + 1 < nrows:
if left_space:
yield r + 1, c - 1
yield r + 1, c
if right_space:
yield r + 1, c + 1
def flash(board: List[List[int]], r: int, c: int) -> Set[Tuple[int, int]]:
res = set()
if board[r][c] < 9:
return
for nr, nc in neighbors(r, c, len(board), len(board[0])):
if board[nr][nc] > 9:
continue
board[nr][nc] += 1
if board[nr][nc] > 9:
res.add((nr, nc))
return res
def reset(board: List[List[int]]) -> Tuple[bool, int]:
synced = True
delta = 10
for r, row in enumerate(board):
for c, v in enumerate(row):
if v > 9:
board[r][c] = 0
else:
synced = False
delta = min(delta, 10 - v)
return synced, delta
class SkaschSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
# Your code goes here
board = parse(s)
nrows = len(board)
ncols = len(board[0])
delta = min(min(10 - v for v in row) for row in board)
idx = 0
while True:
idx += delta
flashes = []
for r, c in itertools.product(range(nrows), range(ncols)):
board[r][c] += delta
if board[r][c] > 9:
flashes.append((r, c))
while flashes:
flashes.extend(flash(board, *flashes.pop()))
synced, delta = reset(board)
if synced:
break
return idx
def test_skasch():
"""
Run `python -m pytest ./day-11/part-2/skasch.py` to test the submission.
"""
assert (
SkaschSubmission().run(
"""
5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526
""".strip()
)
== 195
)
|
py | b40a39a2a92d072e4df6e510a9428df92f904d1e | # -*-coding:utf-8-*-
import argparse
import logging
import shutil
import subprocess
import sys
from lj.judger import do_compile
logger = logging.getLogger()
def lj_compile_and_run(args):
compile_result = do_compile(args.src)
if compile_result.code == 0:
run_with_console(compile_result.runnable)
shutil.rmtree(compile_result.temp_dir)
print("Removing " + compile_result.temp_dir)
def run_with_console(command):
print("Running %s" % command)
print("-" * 20)
# 如果文件名含有空格,用户必须输入引号
proc = subprocess.Popen(command.split(), shell=False, stdin=sys.stdin, stdout=sys.stdout)
try:
while proc.poll() is None:
pass
except KeyboardInterrupt:
pass
print()
print("-" * 20)
print("Process Exit Code: %s" % (str(proc.returncode)))
def main():
parser = argparse.ArgumentParser(description="Local Judge Runner")
parser.add_argument("src", help="source file")
args = parser.parse_args()
lj_compile_and_run(args)
if __name__ == "__main__":
main()
|
py | b40a3ab6ec40c0a63860b2ab7bda2043231f8bf9 | # Copyright 2021 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper of the Question Answering models on HuggingFace platform (context
understanding)
"""
import importlib
from typing import Dict, Set
from transformers import pipeline
from ft.onto.base_ontology import Phrase
from forte.common import Resources
from forte.common.configuration import Config
from forte.data.data_pack import DataPack
from forte.processors.base import PackProcessor
__all__ = [
"QuestionAnsweringSingle",
]
class QuestionAnsweringSingle(PackProcessor):
r"""Wrapper of the models on HuggingFace platform with pipeline tag of
`question-answering` (reading comprehension).
https://huggingface.co/models?pipeline_tag=question-answering
This wrapper could take any model name on HuggingFace platform with pipeline
tag of `question-answering` in configs to make prediction on the context of
user specified entry type in the input pack and the prediction result would
be annotated as `Phrase` in the output pack. User could input the question
in the config.
"""
def __init__(self):
super().__init__()
self.extractor = None
def set_up(self):
device_num = self.configs["cuda_devices"]
self.extractor = pipeline(
"question-answering",
model=self.configs.model_name,
tokenizer=self.configs.model_name,
framework="pt",
device=device_num,
)
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
self.set_up()
def _process(self, input_pack: DataPack):
path_str, module_str = self.configs.entry_type.rsplit(".", 1)
mod = importlib.import_module(path_str)
entry = getattr(mod, module_str)
for entry_specified in input_pack.get(entry_type=entry):
result = self.extractor(
context=entry_specified.text,
question=self.configs.question,
max_answer_len=self.configs.max_answer_len,
handle_impossible_answer=self.configs.handle_impossible_answer,
)
start = result["start"]
end = result["end"]
Phrase(pack=input_pack, begin=start, end=end)
@classmethod
def default_configs(cls):
r"""This defines a basic config structure for `QuestionAnsweringSingle`.
Following are the keys for this dictionary:
- `entry_type`: defines which entry type in the input pack to make
prediction on. The default makes prediction on each `Document`
in the input pack.
- `model_name`: language model, default is
`"ktrapeznikov/biobert_v1.1_pubmed_squad_v2"`.
The wrapper supports Hugging Face models with pipeline tag of
`question-answering`.
- `question`: One question to retrieve answer from the input pack
context.
- `max_answer_len`: The maximum length of predicted answers (e.g.,
only answers with a shorter length are considered).
- `cuda_device`: Device ordinal for CPU/GPU supports. Setting
this to -1 will leverage CPU, a positive will run the model
on the associated CUDA device id.
- `handle_impossible_answer`: Whether or not we accept
impossible as an answer.
Returns: A dictionary with the default config for this processor.
"""
return {
"entry_type": "ft.onto.base_ontology.Document",
"model_name": "ktrapeznikov/biobert_v1.1_pubmed_squad_v2",
"question": "Where do I live",
"max_answer_len": 15,
"cuda_devices": -1,
"handle_impossible_answer": False,
}
def expected_types_and_attributes(self):
r"""Method to add user specified expected type which
would be checked before running the processor if
the pipeline is initialized with
`enforce_consistency=True` or
:meth:`~forte.pipeline.Pipeline.enforce_consistency` was enabled for
the pipeline.
"""
return {self.configs["entry_type"]: set()}
def record(self, record_meta: Dict[str, Set[str]]):
r"""Method to add output type record of `QuestionAnsweringSingle` which
is `"ft.onto.base_ontology.Phrase"`
to :attr:`forte.data.data_pack.Meta.record`.
Args:
record_meta: the field in the datapack for type record that need to
fill in for consistency checking.
"""
if "ft.onto.base_ontology.Phrase" not in record_meta.keys():
record_meta["ft.onto.base_ontology.Phrase"] = set()
|
py | b40a3b5a41e96f2139c6b18fb787aef076390d3c | #! /usr/bin/env python3
'''A library of functions for our cool app'''
def add(a, b):
return a + b
def add1(a):
return a - 1
def sub1(a):
pass
|
py | b40a3bb55ea3945bdc30abfff7b3f56810056555 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys #needed to press enter after the bot types the reply
from bs4 import BeautifulSoup #awesome tool for extracting useful data from websites
import time #to add delay
import re # regular expressions, much more direct than BeautifulSoup, allows to find patterns within the text (it's a bit like a language on its own)
def GetSource(): # used to refresh the content
global br # browser object
elem = br.find_element_by_xpath("//*") # get some kind of "root" element in order to get the actual source
src = elem.get_attribute("outerHTML")
return src
def ParseMessage(msg): #used to remove things like: <!-- react-text: 611 -->
toBeRemoved = re.findall(r'<.+?>', msg) # find all the text between < and >
for text in toBeRemoved:
msg = msg.replace(text, "") # remove each occurence of the text found between < and > so only the actual message is left
return msg
def GetComments():
comments = []
for comChunk in BeautifulSoup(GetSource(), "lxml").find_all("div", class_="comment"): # for each chunk of the code which covers 1 comment
name = comChunk.find("strong", class_="user-name").string #find username of the one who posted it
msg=[] # define new list
for addMsgChunk in comChunk.find_all("div", class_="message"): # each "comment" may consist of several messages if the same person posts few messages in a row, this loop adds them all together into 1 class
msg.append(ParseMessage(repr(addMsgChunk.find("div", class_="markup")))) # appends messages into 1 comment
comments.append({"name":name, "msg":msg}) # appends comments and names into 1 full list
return comments
def GetCurrentMsg():
coms = GetComments()
return coms[-1]["name"], coms[-1]["msg"][-1]
def SendMsg(msg):
global br # br is a browser object
entry = br.find_element_by_xpath('//*[@id="app-mount"]/div/div[2]/div/div[2]/div/section/div[3]/div[2]/div[1]/form/div/div/textarea') # get the text input element
entry.send_keys(msg) # write characters into text input
entry.send_keys(Keys.RETURN) # press enter
br = webdriver.Chrome()
#br.get("https://discord.gg/VxGFHY4")
br.get("https://discordapp.com/channels/265199259291222016/265199259291222016")
raw_input("Press enter when discord is ready...")
lastName=""
lastMsg=""
while True:
time.sleep(0.2)
try:
name, msg = GetCurrentMsg()
if msg.startswith("@Nicolas Cage"):
if name != lastName:
if msg.endswith("help"):
reply = "```Nicolas Cage V0.1 Documentation\nUSAGE: @Nicolas Cage\nCopyright: Nicolas Cage\nAuthor: Nicolas Cage\nSpecial thanks to Nicolas for testing```"
else:
reply = "http://i2.kym-cdn.com/entries/icons/original/000/006/993/1817.jpg"
SendMsg(reply)
lastName = name
except Exception as e:
print "Error: " + repr(e)
''' #shows the latest message
cm = GetCurrentMsg(GetSource())
print cm["name"] + ": " + cm["msg"][len(cm["msg"])-1]
'''
''' saves source of the website to a file
soup = BeautifulSoup(source, "lxml")
soup.find("div", class_="messages-wrapper")
with open("src.html", "wb") as f:
f.write(BeautifulSoup(source, "lxml").prettify().encode("utf-8")) # prettify() function adds identation to the text so it looks nice and clean in the output
'''
|
py | b40a3bc9819ebb36379836d84a91359dd8116ef5 | from xml.sax.saxutils import escape
class SchemaNode(object):
"""Generic node in the schema.
Instance variables:
* `interleave` - signal whether children should be interleaved.
* `occur` - 0=optional, 1=implicit, 2=mandatory, 3=presence
Class variables:
* `ser_format` - dictionary of methods returning string
serialization formats
"""
def element(cls, name, parent=None, interleave=None, occur=0):
"""Create an element node."""
node = cls("element", parent, interleave=interleave)
node.attr["name"] = name
node.occur = occur
return node
element = classmethod(element)
def leaf_list(cls, name, parent=None, interleave=None):
"""Create list node for a leaf-list."""
node = cls("_list_", parent, interleave=interleave)
node.attr["name"] = name
node.keys = None
node.minEl = "0"
node.maxEl = None
return node
leaf_list = classmethod(leaf_list)
def list(cls, name, parent=None, interleave=None):
"""Create list node for a list."""
node = cls.leaf_list(name, parent, interleave=interleave)
node.keys = None
node.keymap = {}
node.occur = 3
return node
list = classmethod(list)
def choice(cls, parent=None, occur=0):
"""Create choice node."""
node = cls("choice", parent)
node.occur = occur
node.default_case = None
return node
choice = classmethod(choice)
def case(cls, parent=None):
"""Create case node."""
node = cls("case", parent)
node.occur = 0
return node
case = classmethod(case)
def define(cls, name, parent=None, interleave=False):
"""Create define node."""
node = cls("define", parent, interleave=interleave)
node.occur = 0
node.attr["name"] = name
return node
define = classmethod(define)
def __init__(self, name, parent=None, text="", interleave=None):
"""Initialize the object under `parent`.
"""
self.name = name
self.parent = parent
if parent is not None: parent.children.append(self)
self.text = text
self.adjust_interleave(interleave)
self.children = []
self.attr = {}
def adjust_interleave(self, interleave):
"""Inherit interleave status from parent if undefined."""
if interleave == None and self.parent:
self.interleave = self.parent.interleave
else:
self.interleave = interleave
def subnode(self, node):
"""Make `node` receiver's child."""
self.children.append(node)
node.parent = self
node.adjust_interleave(None)
def set_attr(self, key, value):
self.attr[key] = value
return self
def data_nodes_count(self):
"""Return the number of receiver's data subnodes."""
return len([ch for ch in self.children
if ch.name in ("element", "choice", "_list_", "ref")])
def start_tag(self, alt=None, empty=False):
"""Return XML start tag for the receiver."""
if alt:
name = alt
else:
name = self.name
result = "<" + name
for it in self.attr:
result += ' %s="%s"' % (it, escape(self.attr[it]))
if empty:
return result + "/>"
else:
return result + ">"
def end_tag(self, alt=None):
"""Return XML end tag for the receiver."""
if alt:
name = alt
else:
name = self.name
return "</" + name + ">"
def serialize(self, occur=None):
"""Return RELAX NG representation of the receiver and subtree.
"""
return (self.ser_format.get(self.name, SchemaNode._default_format)
(self, occur) % (escape(self.text) + ''.join
([ch.serialize() for ch in self.children])))
def _default_format(self, occur):
if self.text or self.children:
return self.start_tag() + "%s" + self.end_tag()
else:
return self.start_tag(empty=True) + "%s"
def _define_format(self, occur):
if hasattr(self, "default"):
self.attr["nma:default"] = self.default
return self.start_tag() + self._chorder() + self.end_tag()
def _element_format(self, occur):
if occur:
occ = occur
else:
occ = self.occur
if occ == 1:
if hasattr(self, "default"):
self.attr["nma:default"] = self.default
else:
self.attr["nma:implicit"] = "true"
fmt = self.start_tag() + self._chorder() + self.end_tag()
if (occ == 2 or self.parent.name == "choice"
or self.parent.name == "case" and self.data_nodes_count() == 1):
return fmt
else:
return "<optional>" + fmt + "</optional>"
def _chorder(self):
"""Add <interleave> if child order is arbitrary."""
if self.interleave and self.data_nodes_count() > 1:
return "<interleave>%s</interleave>"
return "%s"
def _list_format(self, occur):
if self.keys:
self.attr["nma:key"] = " ".join(self.keys)
keys = ''.join([self.keymap[k].serialize(occur=2)
for k in self.keys])
else:
keys = ""
if self.maxEl:
self.attr["nma:max-elements"] = self.maxEl
if int(self.minEl) == 0:
ord_ = "zeroOrMore"
else:
ord_ = "oneOrMore"
if int(self.minEl) > 1:
self.attr["nma:min-elements"] = self.minEl
return ("<" + ord_ + ">" + self.start_tag("element") + keys +
self._chorder() + self.end_tag("element") + "</" + ord_ + ">")
def _choice_format(self, occur):
fmt = self.start_tag() + "%s" + self.end_tag()
if self.occur < 2:
return "<optional>" + fmt + "</optional>"
else:
return fmt
def _case_format(self, occur):
if self.occur == 1:
self.attr["nma:implicit"] = "true"
if len(self.children) == 1 or not self.interleave:
return self.start_tag("group") + "%s" + self.end_tag("group")
else:
return (self.start_tag("interleave") + "%s" +
self.end_tag("interleave"))
ser_format = { "element": _element_format,
"_list_": _list_format,
"choice": _choice_format,
"case": _case_format,
"define": _define_format,
}
|
py | b40a3bdf4673df4e5cb63680dac60e9bb0d1918a | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Decoders for sequence-to-sequence models.
"""
import logging
from abc import ABC, abstractmethod
from typing import Callable, Dict, List, NamedTuple, Tuple, Union
from typing import Optional
import mxnet as mx
from sockeye.config import Config
from . import constants as C
from . import convolution
from . import encoder
from . import layers
from . import rnn
from . import rnn_attention
from . import transformer
from . import utils
logger = logging.getLogger(__name__)
DecoderConfig = Union['RecurrentDecoderConfig', transformer.TransformerConfig, 'ConvolutionalDecoderConfig']
def get_decoder(config: DecoderConfig) -> 'Decoder':
if isinstance(config, RecurrentDecoderConfig):
return RecurrentDecoder(config=config, prefix=C.RNN_DECODER_PREFIX)
elif isinstance(config, ConvolutionalDecoderConfig):
return ConvolutionalDecoder(config=config, prefix=C.CNN_DECODER_PREFIX)
elif isinstance(config, transformer.TransformerConfig):
return TransformerDecoder(config=config, prefix=C.TRANSFORMER_DECODER_PREFIX)
else:
raise ValueError("Unsupported decoder configuration")
class Decoder(ABC):
"""
Generic decoder interface.
A decoder needs to implement code to decode a target sequence known in advance (decode_sequence),
and code to decode a single word given its decoder state (decode_step).
The latter is typically used for inference graphs in beam search.
For the inference module to be able to keep track of decoder's states
a decoder provides methods to return initial states (init_states), state variables and their shapes.
"""
@abstractmethod
def decode_sequence(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param source_encoded: Encoded source: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length, target_num_embed).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Dimension of the embedded target sequence.
:return: Decoder data. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
pass
@abstractmethod
def decode_step(self,
step: int,
target_embed_prev: mx.sym.Symbol,
source_encoded_max_length: int,
*states: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, mx.sym.Symbol, List[mx.sym.Symbol]]:
"""
Decodes a single time step given the current step, the previous embedded target word,
and previous decoder states.
Returns decoder representation for the next prediction, attention probabilities, and next decoder states.
Implementations can maintain an arbitrary number of states.
:param step: Global step of inference procedure, starts with 1.
:param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed).
:param source_encoded_max_length: Length of encoded source time dimension.
:param states: Arbitrary list of decoder states.
:return: logit inputs, attention probabilities, next decoder states.
"""
pass
@abstractmethod
def reset(self):
"""
Reset decoder method. Used for inference.
"""
pass
@abstractmethod
def get_num_hidden(self) -> int:
"""
:return: The representation size of this decoder.
"""
pass
@abstractmethod
def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states.
"""
pass
@abstractmethod
def state_variables(self, target_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns the list of symbolic variables for this decoder to be used during inference.
:param target_max_length: Current target sequence lengths.
:return: List of symbolic variables.
"""
pass
@abstractmethod
def state_shapes(self,
batch_size: int,
target_max_length: int,
source_encoded_max_length: int,
source_encoded_depth: int) -> List[mx.io.DataDesc]:
"""
Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth.
Used for inference.
:param batch_size: Batch size during inference.
:param target_max_length: Current target sequence length.
:param source_encoded_max_length: Size of encoder time dimension.
:param source_encoded_depth: Depth of encoded source.
:return: List of shape descriptions.
"""
pass
def get_max_seq_len(self) -> Optional[int]:
"""
:return: The maximum length supported by the decoder if such a restriction exists.
"""
return None
def get_num_heads(self) -> int:
return 1
class TransformerDecoder(Decoder):
"""
Transformer decoder as in Vaswani et al, 2017: Attention is all you need.
In training, computation scores for each position of the known target sequence are compouted in parallel,
yielding most of the speedup.
At inference time, the decoder block is evaluated again and again over a maximum length input sequence that is
initially filled with zeros and grows during beam search with predicted tokens. Appropriate masking at every
time-step ensures correct self-attention scores and is updated with every step.
:param config: Transformer configuration.
:param prefix: Name prefix for symbols of this decoder.
"""
def __init__(self,
config: transformer.TransformerConfig,
prefix: str = C.TRANSFORMER_DECODER_PREFIX) -> None:
self.config = config
self.prefix = prefix
self.layers = [transformer.TransformerDecoderBlock(
config, prefix="%s%d_" % (prefix, i)) for i in range(config.num_layers)]
self.final_process = transformer.TransformerProcessBlock(sequence=config.preprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%sfinal_process_" % prefix)
self.pos_embedding = encoder.get_positional_embedding(config.positional_embedding_type,
config.model_size,
max_seq_len=config.max_seq_len_target,
fixed_pos_embed_scale_up_input=True,
fixed_pos_embed_scale_down_positions=False,
prefix=C.TARGET_POSITIONAL_EMBEDDING_PREFIX)
def decode_sequence(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param source_encoded: Encoded source: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length, target_num_embed).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Dimension of the embedded target sequence.
:return: Decoder data. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
# (batch_size * heads, max_length)
source_bias = transformer.get_variable_length_bias(lengths=source_encoded_lengths,
max_length=source_encoded_max_length,
num_heads=self.config.attention_heads,
fold_heads=True,
name="%ssource_bias" % self.prefix)
# (batch_size * heads, 1, max_length)
source_bias = mx.sym.expand_dims(source_bias, axis=1)
# (1, target_max_length, target_max_length)
target_bias = transformer.get_autoregressive_bias(target_embed_max_length, name="%starget_bias" % self.prefix)
# target: (batch_size, target_max_length, model_size)
target, _, target_max_length = self.pos_embedding.encode(target_embed, None, target_embed_max_length)
if self.config.dropout_prepost > 0.0:
target = mx.sym.Dropout(data=target, p=self.config.dropout_prepost)
for layer in self.layers:
target, _, _ = layer(target=target,
target_bias=target_bias,
source=source_encoded,
source_bias=source_bias)
target = self.final_process(data=target, prev=None)
return target
def decode_step(self,
step: int,
target_embed_prev: mx.sym.Symbol,
source_encoded_max_length: int,
*states: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, mx.sym.Symbol, List[mx.sym.Symbol]]:
"""
Decodes a single time step given the current step, the previous embedded target word,
and previous decoder states.
Returns decoder representation for the next prediction, attention probabilities, and next decoder states.
Implementations can maintain an arbitrary number of states.
:param step: Global step of inference procedure, starts with 1.
:param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed).
:param source_encoded_max_length: Length of encoded source time dimension.
:param states: Arbitrary list of decoder states.
:return: logit inputs, attention probabilities, next decoder states.
"""
# for step > 1, states contains source_encoded, source_encoded_lengths, and a cache tensor
source_encoded, source_encoded_lengths = states[:2] # pylint: disable=unbalanced-tuple-unpacking
# symbolic indices of the previous word
indices = mx.sym.arange(start=step - 1, stop=step, step=1, name='indices')
# (batch_size, num_embed)
target_embed_prev = self.pos_embedding.encode_positions(indices, target_embed_prev)
# (batch_size, 1, num_embed)
target = mx.sym.expand_dims(target_embed_prev, axis=1)
# (batch_size * heads, max_length)
source_bias = transformer.get_variable_length_bias(lengths=source_encoded_lengths,
max_length=source_encoded_max_length,
num_heads=self.config.attention_heads,
fold_heads=True,
name="%ssource_bias" % self.prefix)
# (batch_size * heads, 1, max_length)
source_bias = mx.sym.expand_dims(source_bias, axis=1)
# auto-regressive bias for last position in sequence
# (1, target_max_length, target_max_length)
target_bias = transformer.get_autoregressive_bias(step, name="%sbias" % self.prefix)
target_bias = mx.sym.slice_axis(target_bias, axis=1, begin=-1, end=step)
# retrieve precomputed self-attention keys & values for each layer from states.
layer_caches = self._get_layer_caches_from_states(list(states))
cache = [] # type: List[mx.sym.Symbol]
for layer, layer_cache in zip(self.layers, layer_caches):
target, enc_dec_attention, dec_attention = layer(target=target,
target_bias=target_bias,
source=source_encoded,
source_bias=source_bias,
cache=layer_cache)
# store updated keys and values in the cache.
# (layer.__call__() has the side-effect of updating contents of layer_cache)
cache += [layer_cache['k'], layer_cache['v']]
cache = mx.sym.concat(*cache, dim=1, name='new_cache')
# (batch_size, 1, model_size)
target = self.final_process(data=target, prev=None)
# (batch_size, model_size)
target = mx.sym.reshape(target, shape=(-3, -1))
# TODO(fhieber): no attention probs for now
# attention_probs = mx.sym.sum(mx.sym.zeros_like(source_encoded), axis=2, keepdims=False)
# (batch_size, heads, source_seq_length)
enc_dec_attention = enc_dec_attention.reshape((0, -3, -1))
# (batch_size, heads, target_seq_length)
dec_attention = dec_attention.reshape((0, -3, -1))
new_states = [source_encoded, source_encoded_lengths, cache]
return target, enc_dec_attention, dec_attention, new_states
def get_num_heads(self) -> int:
return self.config.attention_heads
def _get_layer_caches_from_states(self, states: List[mx.sym.Symbol]) -> List[Dict[str, Optional[mx.sym.Symbol]]]:
"""
For decoder time steps > 1 there will be a cache tensor available that contains
previously computed key & value tensors for each transformer layer.
The cache tensor passed in is concatenated along the time-axis for efficiency.
:param states: List of states passed to decode_step().
:return: List of layer cache dictionaries.
"""
cache = None
if len(states) == 3:
cache = states[2]
# len(self.layers) * 2 cache items
cache = mx.sym.split(cache, num_outputs=len(self.layers) * 2, axis=1, squeeze_axis=False)
if not cache: # first decoder step
return [{'k': None, 'v': None} for _ in range(len(self.layers))]
else:
layer_caches = [] # type: List[Dict[str, Optional[mx.sym.Symbol]]]
for i in range(len(self.layers)):
layer_caches.append({'k': cache[2 * i + 0], 'v': cache[2 * i + 1]})
return layer_caches
def reset(self):
pass
def get_num_hidden(self) -> int:
"""
:return: The representation size of this decoder.
"""
return self.config.model_size
def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states.
"""
return [source_encoded, source_encoded_lengths]
def state_variables(self, target_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns the list of symbolic variables for this decoder to be used during inference.
:param target_max_length: Current target sequence length.
:return: List of symbolic variables.
"""
variables = [mx.sym.Variable(C.SOURCE_ENCODED_NAME),
mx.sym.Variable(C.SOURCE_LENGTH_NAME)]
if target_max_length > 1: # no cache for initial decoder step
variables.append(mx.sym.Variable('cache'))
return variables
def state_shapes(self,
batch_size: int,
target_max_length: int,
source_encoded_max_length: int,
source_encoded_depth: int) -> List[mx.io.DataDesc]:
"""
Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth.
Used for inference.
:param batch_size: Batch size during inference.
:param target_max_length: Current target sequence length.
:param source_encoded_max_length: Size of encoder time dimension.
:param source_encoded_depth: Depth of encoded source.
:return: List of shape descriptions.
"""
shapes = [mx.io.DataDesc(C.SOURCE_ENCODED_NAME,
(batch_size, source_encoded_max_length, source_encoded_depth),
layout=C.BATCH_MAJOR),
mx.io.DataDesc(C.SOURCE_LENGTH_NAME, (batch_size,), layout="N")]
if target_max_length > 1: # no cache for initial decoder step
# the cache tensor passed in and out of the decoder step module contains
# all cache tensors concatenated along the time axis
# (as all inputs to the module need to of same batch size).
shapes.append(mx.io.DataDesc(name='cache',
shape=(batch_size,
(target_max_length - 1) * len(self.layers) * 2,
self.config.model_size),
layout=C.BATCH_MAJOR))
return shapes
def get_max_seq_len(self) -> Optional[int]:
# The positional embeddings potentially pose a limit on the maximum length at inference time.
return self.pos_embedding.get_max_seq_len()
RecurrentDecoderState = NamedTuple('RecurrentDecoderState', [
('hidden', mx.sym.Symbol),
('layer_states', List[mx.sym.Symbol]),
])
"""
RecurrentDecoder state.
:param hidden: Hidden state after attention mechanism. Shape: (batch_size, num_hidden).
:param layer_states: Hidden states for RNN layers of RecurrentDecoder. Shape: List[(batch_size, rnn_num_hidden)]
"""
class RecurrentDecoderConfig(Config):
"""
Recurrent decoder configuration.
:param max_seq_len_source: Maximum source sequence length
:param rnn_config: RNN configuration.
:param attention_config: Attention configuration.
:param hidden_dropout: Dropout probability on next decoder hidden state.
:param state_init: Type of RNN decoder state initialization: zero, last, average.
:param context_gating: Whether to use context gating.
:param layer_normalization: Apply layer normalization.
:param attention_in_upper_layers: Pass the attention value to all layers in the decoder.
"""
def __init__(self,
max_seq_len_source: int,
rnn_config: rnn.RNNConfig,
attention_config: rnn_attention.AttentionConfig,
hidden_dropout: float = .0, # TODO: move this dropout functionality to OutputLayer
state_init: str = C.RNN_DEC_INIT_LAST,
context_gating: bool = False,
layer_normalization: bool = False,
attention_in_upper_layers: bool = False) -> None:
super().__init__()
self.max_seq_len_source = max_seq_len_source
self.rnn_config = rnn_config
self.attention_config = attention_config
self.hidden_dropout = hidden_dropout
self.state_init = state_init
self.context_gating = context_gating
self.layer_normalization = layer_normalization
self.attention_in_upper_layers = attention_in_upper_layers
class RecurrentDecoder(Decoder):
"""
RNN Decoder with attention.
The architecture is based on Luong et al, 2015: Effective Approaches to Attention-based Neural Machine Translation.
:param config: Configuration for recurrent decoder.
:param prefix: Decoder symbol prefix.
"""
def __init__(self,
config: RecurrentDecoderConfig,
prefix: str = C.RNN_DECODER_PREFIX) -> None:
# TODO: implement variant without input feeding
self.config = config
self.rnn_config = config.rnn_config
self.attention = rnn_attention.get_attention(config.attention_config, config.max_seq_len_source)
self.prefix = prefix
self.num_hidden = self.rnn_config.num_hidden
if self.config.context_gating:
utils.check_condition(not self.config.attention_in_upper_layers,
"Context gating is not supported with attention in upper layers.")
self.gate_w = mx.sym.Variable("%sgate_weight" % prefix)
self.gate_b = mx.sym.Variable("%sgate_bias" % prefix)
self.mapped_rnn_output_w = mx.sym.Variable("%smapped_rnn_output_weight" % prefix)
self.mapped_rnn_output_b = mx.sym.Variable("%smapped_rnn_output_bias" % prefix)
self.mapped_context_w = mx.sym.Variable("%smapped_context_weight" % prefix)
self.mapped_context_b = mx.sym.Variable("%smapped_context_bias" % prefix)
if self.rnn_config.residual:
utils.check_condition(self.config.rnn_config.first_residual_layer >= 2,
"Residual connections on the first decoder layer are not supported as input and "
"output dimensions do not match.")
# Stacked RNN
if self.rnn_config.num_layers == 1 or not self.config.attention_in_upper_layers:
self.rnn_pre_attention = rnn.get_stacked_rnn(self.rnn_config, self.prefix, parallel_inputs=False)
self.rnn_post_attention = None
else:
self.rnn_pre_attention = rnn.get_stacked_rnn(self.rnn_config, self.prefix, parallel_inputs=False,
layers=[0])
self.rnn_post_attention = rnn.get_stacked_rnn(self.rnn_config, self.prefix, parallel_inputs=True,
layers=range(1, self.rnn_config.num_layers))
self.rnn_pre_attention_n_states = len(self.rnn_pre_attention.state_shape)
if self.config.state_init != C.RNN_DEC_INIT_ZERO:
self._create_state_init_parameters()
# Hidden state parameters
self.hidden_w = mx.sym.Variable("%shidden_weight" % prefix)
self.hidden_b = mx.sym.Variable("%shidden_bias" % prefix)
self.hidden_norm = layers.LayerNormalization(self.num_hidden,
prefix="%shidden_norm" % prefix) \
if self.config.layer_normalization else None
def _create_state_init_parameters(self):
"""
Creates parameters for encoder last state transformation into decoder layer initial states.
"""
self.init_ws, self.init_bs, self.init_norms = [], [], []
# shallow copy of the state shapes:
state_shapes = list(self.rnn_pre_attention.state_shape)
if self.rnn_post_attention:
state_shapes += self.rnn_post_attention.state_shape
for state_idx, (_, init_num_hidden) in enumerate(state_shapes):
self.init_ws.append(mx.sym.Variable("%senc2decinit_%d_weight" % (self.prefix, state_idx)))
self.init_bs.append(mx.sym.Variable("%senc2decinit_%d_bias" % (self.prefix, state_idx)))
if self.config.layer_normalization:
self.init_norms.append(layers.LayerNormalization(num_hidden=init_num_hidden,
prefix="%senc2decinit_%d_norm" % (
self.prefix, state_idx)))
def decode_sequence(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param source_encoded: Encoded source: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length, target_num_embed).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Dimension of the embedded target sequence.
:return: Decoder data. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
# target_embed: target_seq_len * (batch_size, num_target_embed)
target_embed = mx.sym.split(data=target_embed, num_outputs=target_embed_max_length, axis=1, squeeze_axis=True)
# get recurrent attention function conditioned on source
attention_func = self.attention.on(source_encoded, source_encoded_lengths,
source_encoded_max_length)
attention_state = self.attention.get_initial_state(source_encoded_lengths, source_encoded_max_length)
# initialize decoder states
# hidden: (batch_size, rnn_num_hidden)
# layer_states: List[(batch_size, state_num_hidden]
state = self.get_initial_state(source_encoded, source_encoded_lengths)
# hidden_all: target_seq_len * (batch_size, 1, rnn_num_hidden)
hidden_all = []
# TODO: possible alternative: feed back the context vector instead of the hidden (see lamtram)
self.reset()
for seq_idx in range(target_embed_max_length):
# hidden: (batch_size, rnn_num_hidden)
state, attention_state = self._step(target_embed[seq_idx],
state,
attention_func,
attention_state,
seq_idx)
# hidden_expanded: (batch_size, 1, rnn_num_hidden)
hidden_all.append(mx.sym.expand_dims(data=state.hidden, axis=1))
# concatenate along time axis
# hidden_concat: (batch_size, target_seq_len, rnn_num_hidden)
hidden_concat = mx.sym.concat(*hidden_all, dim=1, name="%shidden_concat" % self.prefix)
return hidden_concat
def decode_step(self,
step: int,
target_embed_prev: mx.sym.Symbol,
source_encoded_max_length: int,
*states: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, mx.sym.Symbol, List[mx.sym.Symbol]]:
"""
Decodes a single time step given the current step, the previous embedded target word,
and previous decoder states.
Returns decoder representation for the next prediction, attention probabilities, and next decoder states.
Implementations can maintain an arbitrary number of states.
:param step: Global step of inference procedure, starts with 1.
:param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed).
:param source_encoded_max_length: Length of encoded source time dimension.
:param states: Arbitrary list of decoder states.
:return: logit inputs, attention probabilities, next decoder states.
"""
source_encoded, prev_dynamic_source, source_encoded_length, prev_hidden, *layer_states = states
attention_func = self.attention.on(source_encoded, source_encoded_length, source_encoded_max_length)
prev_state = RecurrentDecoderState(prev_hidden, list(layer_states))
prev_attention_state = rnn_attention.AttentionState(context=None, probs=None,
dynamic_source=prev_dynamic_source)
# state.hidden: (batch_size, rnn_num_hidden)
# attention_state.dynamic_source: (batch_size, source_seq_len, coverage_num_hidden)
# attention_state.probs: (batch_size, source_seq_len)
state, attention_state = self._step(target_embed_prev,
prev_state,
attention_func,
prev_attention_state)
new_states = [source_encoded,
attention_state.dynamic_source,
source_encoded_length,
state.hidden] + state.layer_states
# attention_probs: (batch_size, 1, source_seq_len)
attention_probs = attention_state.probs.reshape((-4, -1, 1, -2))
return state.hidden, attention_probs, None, new_states
def reset(self):
"""
Calls reset on the RNN cell.
"""
self.rnn_pre_attention.reset()
# Shallow copy of cells
cells_to_reset = list(self.rnn_pre_attention._cells)
if self.rnn_post_attention:
self.rnn_post_attention.reset()
cells_to_reset += self.rnn_post_attention._cells
for cell in cells_to_reset:
# TODO remove this once mxnet.rnn.ModifierCell.reset() invokes reset() of base_cell
if isinstance(cell, mx.rnn.ModifierCell):
cell.base_cell.reset()
cell.reset()
def get_num_hidden(self) -> int:
"""
:return: The representation size of this decoder.
"""
return self.num_hidden
def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states.
"""
hidden, layer_states = self.get_initial_state(source_encoded, source_encoded_lengths)
context, attention_probs, dynamic_source = self.attention.get_initial_state(source_encoded_lengths,
source_encoded_max_length)
states = [source_encoded, dynamic_source, source_encoded_lengths, hidden] + layer_states
return states
def state_variables(self, target_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns the list of symbolic variables for this decoder to be used during inference.
:param target_max_length: Current target sequence lengths.
:return: List of symbolic variables.
"""
return [mx.sym.Variable(C.SOURCE_ENCODED_NAME),
mx.sym.Variable(C.SOURCE_DYNAMIC_PREVIOUS_NAME),
mx.sym.Variable(C.SOURCE_LENGTH_NAME),
mx.sym.Variable(C.HIDDEN_PREVIOUS_NAME)] + \
[mx.sym.Variable("%senc2decinit_%d" % (self.prefix, i)) for i in
range(len(sum([rnn.state_info for rnn in self.get_rnn_cells()], [])))]
def state_shapes(self,
batch_size: int,
target_max_length: int,
source_encoded_max_length: int,
source_encoded_depth: int) -> List[mx.io.DataDesc]:
"""
Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth.
Used for inference.
:param batch_size: Batch size during inference.
:param target_max_length: Current target sequence length.
:param source_encoded_max_length: Size of encoder time dimension.
:param source_encoded_depth: Depth of encoded source.
:return: List of shape descriptions.
"""
return [mx.io.DataDesc(C.SOURCE_ENCODED_NAME,
(batch_size, source_encoded_max_length, source_encoded_depth),
layout=C.BATCH_MAJOR),
mx.io.DataDesc(C.SOURCE_DYNAMIC_PREVIOUS_NAME,
(batch_size, source_encoded_max_length, self.attention.dynamic_source_num_hidden),
layout=C.BATCH_MAJOR),
mx.io.DataDesc(C.SOURCE_LENGTH_NAME,
(batch_size,),
layout="N"),
mx.io.DataDesc(C.HIDDEN_PREVIOUS_NAME,
(batch_size, self.num_hidden),
layout="NC")] + \
[mx.io.DataDesc("%senc2decinit_%d" % (self.prefix, i),
(batch_size, num_hidden),
layout=C.BATCH_MAJOR) for i, (_, num_hidden) in enumerate(
sum([rnn.state_shape for rnn in self.get_rnn_cells()], [])
)]
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
"""
Returns a list of RNNCells used by this decoder.
"""
cells = [self.rnn_pre_attention]
if self.rnn_post_attention:
cells.append(self.rnn_post_attention)
return cells
def get_initial_state(self,
source_encoded: mx.sym.Symbol,
source_encoded_length: mx.sym.Symbol) -> RecurrentDecoderState:
"""
Computes initial states of the decoder, hidden state, and one for each RNN layer.
Optionally, init states for RNN layers are computed using 1 non-linear FC
with the last state of the encoder as input.
:param source_encoded: Concatenated encoder states. Shape: (batch_size, source_seq_len, encoder_num_hidden).
:param source_encoded_length: Lengths of source sequences. Shape: (batch_size,).
:return: Decoder state.
"""
# TODO (tdomhan): Due to a bug in swapaxes we need to avoid in-place gradient additions, see:
# https://github.com/apache/incubator-mxnet/pull/9495
source_encoded = mx.sym.identity(source_encoded)
# The mx.sym.Sequence* operators expect time-major data.
# TODO (tdomhan): Use the `axis` argument instead of transposing once the new MXNet version becomes available.
# (see https://github.com/apache/incubator-mxnet/pull/9306)
# (source_encoded_max_length, batch_size, encoder_depth)
source_encoded_time_major = mx.sym.swapaxes(source_encoded, dim1=0, dim2=1, name='source_encoded_time_major')
# we derive the shape of hidden and layer_states from some input to enable
# shape inference for the batch dimension during inference.
# (batch_size, 1)
zeros = mx.sym.expand_dims(mx.sym.zeros_like(source_encoded_length), axis=1)
# last encoder state: (batch, num_hidden)
source_encoded_last = mx.sym.SequenceLast(data=source_encoded_time_major,
sequence_length=source_encoded_length,
use_sequence_length=True) \
if self.config.state_init == C.RNN_DEC_INIT_LAST else None
source_masked = mx.sym.SequenceMask(data=source_encoded_time_major,
sequence_length=source_encoded_length,
use_sequence_length=True,
value=0.) if self.config.state_init == C.RNN_DEC_INIT_AVG else None
# decoder hidden state
hidden = mx.sym.tile(data=zeros, reps=(1, self.num_hidden))
# initial states for each layer
layer_states = []
for state_idx, (_, init_num_hidden) in enumerate(sum([rnn.state_shape for rnn in self.get_rnn_cells()], [])):
if self.config.state_init == C.RNN_DEC_INIT_ZERO:
init = mx.sym.tile(data=zeros, reps=(1, init_num_hidden))
else:
if self.config.state_init == C.RNN_DEC_INIT_LAST:
init = source_encoded_last
elif self.config.state_init == C.RNN_DEC_INIT_AVG:
# (batch_size, encoder_num_hidden)
init = mx.sym.broadcast_div(mx.sym.sum(source_masked, axis=0, keepdims=False),
mx.sym.expand_dims(source_encoded_length, axis=1))
else:
raise ValueError("Unknown decoder state init type '%s'" % self.config.state_init)
init = mx.sym.FullyConnected(data=init,
num_hidden=init_num_hidden,
weight=self.init_ws[state_idx],
bias=self.init_bs[state_idx],
name="%senc2decinit_%d" % (self.prefix, state_idx))
if self.config.layer_normalization:
init = self.init_norms[state_idx].normalize(init)
init = mx.sym.Activation(data=init, act_type="tanh",
name="%senc2dec_inittanh_%d" % (self.prefix, state_idx))
layer_states.append(init)
return RecurrentDecoderState(hidden, layer_states)
def _step(self, word_vec_prev: mx.sym.Symbol,
state: RecurrentDecoderState,
attention_func: Callable,
attention_state: rnn_attention.AttentionState,
seq_idx: int = 0) -> Tuple[RecurrentDecoderState, rnn_attention.AttentionState]:
"""
Performs single-time step in the RNN, given previous word vector, previous hidden state, attention function,
and RNN layer states.
:param word_vec_prev: Embedding of previous target word. Shape: (batch_size, num_target_embed).
:param state: Decoder state consisting of hidden and layer states.
:param attention_func: Attention function to produce context vector.
:param attention_state: Previous attention state.
:param seq_idx: Decoder time step.
:return: (new decoder state, updated attention state).
"""
# (1) RNN step
# concat previous word embedding and previous hidden state
rnn_input = mx.sym.concat(word_vec_prev, state.hidden, dim=1,
name="%sconcat_target_context_t%d" % (self.prefix, seq_idx))
# rnn_pre_attention_output: (batch_size, rnn_num_hidden)
# next_layer_states: num_layers * [batch_size, rnn_num_hidden]
rnn_pre_attention_output, rnn_pre_attention_layer_states = \
self.rnn_pre_attention(rnn_input, state.layer_states[:self.rnn_pre_attention_n_states])
# (2) Attention step
attention_input = self.attention.make_input(seq_idx, word_vec_prev, rnn_pre_attention_output)
attention_state = attention_func(attention_input, attention_state)
# (3) Attention handling (and possibly context gating)
if self.rnn_post_attention:
upper_rnn_output, upper_rnn_layer_states = \
self.rnn_post_attention(rnn_pre_attention_output, attention_state.context,
state.layer_states[self.rnn_pre_attention_n_states:])
hidden_concat = mx.sym.concat(upper_rnn_output, attention_state.context,
dim=1, name='%shidden_concat_t%d' % (self.prefix, seq_idx))
if self.config.hidden_dropout > 0:
hidden_concat = mx.sym.Dropout(data=hidden_concat, p=self.config.hidden_dropout,
name='%shidden_concat_dropout_t%d' % (self.prefix, seq_idx))
hidden = self._hidden_mlp(hidden_concat, seq_idx)
# TODO: add context gating?
else:
upper_rnn_layer_states = []
hidden_concat = mx.sym.concat(rnn_pre_attention_output, attention_state.context,
dim=1, name='%shidden_concat_t%d' % (self.prefix, seq_idx))
if self.config.hidden_dropout > 0:
hidden_concat = mx.sym.Dropout(data=hidden_concat, p=self.config.hidden_dropout,
name='%shidden_concat_dropout_t%d' % (self.prefix, seq_idx))
if self.config.context_gating:
hidden = self._context_gate(hidden_concat, rnn_pre_attention_output, attention_state, seq_idx)
else:
hidden = self._hidden_mlp(hidden_concat, seq_idx)
return RecurrentDecoderState(hidden, rnn_pre_attention_layer_states + upper_rnn_layer_states), attention_state
def _hidden_mlp(self, hidden_concat: mx.sym.Symbol, seq_idx: int) -> mx.sym.Symbol:
hidden = mx.sym.FullyConnected(data=hidden_concat,
num_hidden=self.num_hidden, # to state size of RNN
weight=self.hidden_w,
bias=self.hidden_b,
name='%shidden_fc_t%d' % (self.prefix, seq_idx))
if self.config.layer_normalization:
hidden = self.hidden_norm.normalize(hidden)
# hidden: (batch_size, rnn_num_hidden)
hidden = mx.sym.Activation(data=hidden, act_type="tanh",
name="%snext_hidden_t%d" % (self.prefix, seq_idx))
return hidden
def _context_gate(self,
hidden_concat: mx.sym.Symbol,
rnn_output: mx.sym.Symbol,
attention_state: rnn_attention.AttentionState,
seq_idx: int) -> mx.sym.Symbol:
gate = mx.sym.FullyConnected(data=hidden_concat,
num_hidden=self.num_hidden,
weight=self.gate_w,
bias=self.gate_b,
name='%shidden_gate_t%d' % (self.prefix, seq_idx))
gate = mx.sym.Activation(data=gate, act_type="sigmoid",
name='%shidden_gate_act_t%d' % (self.prefix, seq_idx))
mapped_rnn_output = mx.sym.FullyConnected(data=rnn_output,
num_hidden=self.num_hidden,
weight=self.mapped_rnn_output_w,
bias=self.mapped_rnn_output_b,
name="%smapped_rnn_output_fc_t%d" % (self.prefix, seq_idx))
mapped_context = mx.sym.FullyConnected(data=attention_state.context,
num_hidden=self.num_hidden,
weight=self.mapped_context_w,
bias=self.mapped_context_b,
name="%smapped_context_fc_t%d" % (self.prefix, seq_idx))
hidden = gate * mapped_rnn_output + (1 - gate) * mapped_context
if self.config.layer_normalization:
hidden = self.hidden_norm.normalize(hidden)
# hidden: (batch_size, rnn_num_hidden)
hidden = mx.sym.Activation(data=hidden, act_type="tanh",
name="%snext_hidden_t%d" % (self.prefix, seq_idx))
return hidden
class ConvolutionalDecoderConfig(Config):
"""
Convolutional decoder configuration.
:param cnn_config: Configuration for the convolution block.
:param max_seq_len_target: Maximum target sequence length.
:param num_embed: Target word embedding size.
:param encoder_num_hidden: Number of hidden units of the encoder.
:param num_layers: The number of convolutional layers.
:param positional_embedding_type: The type of positional embedding.
:param hidden_dropout: Dropout probability on next decoder hidden state.
"""
def __init__(self,
cnn_config: convolution.ConvolutionConfig,
max_seq_len_target: int,
num_embed: int,
encoder_num_hidden: int,
num_layers: int,
positional_embedding_type: str,
project_qkv: bool = False,
hidden_dropout: float = .0) -> None:
super().__init__()
self.cnn_config = cnn_config
self.max_seq_len_target = max_seq_len_target
self.num_embed = num_embed
self.encoder_num_hidden = encoder_num_hidden
self.num_layers = num_layers
self.positional_embedding_type = positional_embedding_type
self.project_qkv = project_qkv
self.hidden_dropout = hidden_dropout
class ConvolutionalDecoder(Decoder):
"""
Convolutional decoder similar to Gehring et al. 2017.
The decoder consists of an embedding layer, positional embeddings, and layers
of convolutional blocks with residual connections.
Notable differences to Gehring et al. 2017:
* Here the context vectors are created from the last encoder state (instead of using the last encoder state as the
key and the sum of the encoder state and the source embedding as the value)
* The encoder gradients are not scaled down by 1/(2 * num_attention_layers).
* Residual connections are not scaled down by math.sqrt(0.5).
* Attention is computed in the hidden dimension instead of the embedding dimension (removes need for training
several projection matrices)
:param config: Configuration for convolutional decoder.
:param prefix: Name prefix for symbols of this decoder.
"""
def __init__(self,
config: ConvolutionalDecoderConfig,
prefix: str = C.DECODER_PREFIX) -> None:
super().__init__()
self.config = config
self.prefix = prefix
# TODO: potentially project the encoder hidden size to the decoder hidden size.
utils.check_condition(config.encoder_num_hidden == config.cnn_config.num_hidden,
"We need to have the same number of hidden units in the decoder "
"as we have in the encoder")
self.pos_embedding = encoder.get_positional_embedding(config.positional_embedding_type,
num_embed=config.num_embed,
max_seq_len=config.max_seq_len_target,
fixed_pos_embed_scale_up_input=False,
fixed_pos_embed_scale_down_positions=True,
prefix=C.TARGET_POSITIONAL_EMBEDDING_PREFIX)
self.layers = [convolution.ConvolutionBlock(
config.cnn_config,
pad_type='left',
prefix="%s%d_" % (prefix, i)) for i in range(config.num_layers)]
if self.config.project_qkv:
self.attention_layers = [layers.ProjectedDotAttention("%s%d_" % (prefix, i),
self.config.cnn_config.num_hidden)
for i in range(config.num_layers)]
else:
self.attention_layers = [layers.PlainDotAttention() for _ in range(config.num_layers)] # type: ignore
self.i2h_weight = mx.sym.Variable('%si2h_weight' % prefix)
def decode_sequence(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param source_encoded: Encoded source: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length, target_num_embed).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Dimension of the embedded target sequence.
:return: Decoder data. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
# (batch_size, target_seq_len, num_hidden)
target_hidden = self._decode(source_encoded=source_encoded,
source_encoded_lengths=source_encoded_lengths,
target_embed=target_embed,
target_embed_lengths=target_embed_lengths,
target_embed_max_length=target_embed_max_length)
return target_hidden
def _decode(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decode the target and produce a sequence of hidden states.
:param source_encoded: Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Shape: (batch_size,).
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Size of embedded target sequence dimension.
:return: The target hidden states. Shape: (batch_size, target_seq_len, num_hidden).
"""
target_embed, target_embed_lengths, target_embed_max_length = self.pos_embedding.encode(target_embed,
target_embed_lengths,
target_embed_max_length)
# target_hidden: (batch_size, target_seq_len, num_hidden)
target_hidden = mx.sym.FullyConnected(data=target_embed,
num_hidden=self.config.cnn_config.num_hidden,
no_bias=True,
flatten=False,
weight=self.i2h_weight)
target_hidden_prev = target_hidden
drop_prob = self.config.hidden_dropout
for layer, att_layer in zip(self.layers, self.attention_layers):
# (batch_size, target_seq_len, num_hidden)
target_hidden = layer(mx.sym.Dropout(target_hidden, p=drop_prob) if drop_prob > 0 else target_hidden,
target_embed_lengths, target_embed_max_length)
# (batch_size, target_seq_len, num_embed)
context = att_layer(target_hidden, source_encoded, source_encoded_lengths)
# residual connection:
target_hidden = target_hidden_prev + target_hidden + context
target_hidden_prev = target_hidden
return target_hidden
def decode_step(self,
step: int,
target_embed_prev: mx.sym.Symbol,
source_encoded_max_length: int,
*states: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, mx.sym.Symbol, List[mx.sym.Symbol]]:
"""
Decodes a single time step given the current step, the previous embedded target word,
and previous decoder states.
Returns decoder representation for the next prediction, attention probabilities, and next decoder states.
Implementations can maintain an arbitrary number of states.
:param step: Global step of inference procedure, starts with 1.
:param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed).
:param source_encoded_max_length: Length of encoded source time dimension.
:param states: Arbitrary list of decoder states.
:return: logit inputs, attention probabilities, next decoder states.
"""
# Source_encoded: (batch_size, source_encoded_max_length, encoder_depth)
source_encoded, source_encoded_lengths, *layer_states = states
# The last layer doesn't keep any state as we only need the last hidden vector for the next word prediction
# but none of the previous hidden vectors
last_layer_state = None
embed_layer_state = layer_states[0]
cnn_layer_states = list(layer_states[1:]) + [last_layer_state]
kernel_width = self.config.cnn_config.kernel_width
new_layer_states = []
# symbolic indices of the previous word
# (batch_size, num_embed)
indices = mx.sym.arange(start=step - 1, stop=step, step=1, name='indices')
target_embed_prev = self.pos_embedding.encode_positions(indices, target_embed_prev)
# (batch_size, num_hidden)
target_hidden_step = mx.sym.FullyConnected(data=target_embed_prev,
num_hidden=self.config.cnn_config.num_hidden,
no_bias=True,
weight=self.i2h_weight)
# re-arrange outcoming layer to the dimensions of the output
# (batch_size, 1, num_hidden)
target_hidden_step = mx.sym.expand_dims(target_hidden_step, axis=1)
# (batch_size, kernel_width, num_hidden)
target_hidden = mx.sym.concat(embed_layer_state, target_hidden_step, dim=1)
new_layer_states.append(mx.sym.slice_axis(data=target_hidden, axis=1, begin=1, end=kernel_width))
target_hidden_step_prev = target_hidden_step
drop_prob = self.config.hidden_dropout
for layer, att_layer, layer_state in zip(self.layers, self.attention_layers, cnn_layer_states):
# (batch_size, kernel_width, num_hidden) -> (batch_size, 1, num_hidden)
target_hidden_step = layer.step(mx.sym.Dropout(target_hidden, p=drop_prob)
if drop_prob > 0 else target_hidden)
# (batch_size, 1, num_embed)
# TODO: compute the source encoded projection only once for efficiency reasons
context_step = att_layer(target_hidden_step, source_encoded, source_encoded_lengths)
# residual connection:
target_hidden_step = target_hidden_step_prev + target_hidden_step + context_step
target_hidden_step_prev = target_hidden_step
if layer_state is not None:
# combine with layer state
# (batch_size, kernel_width, num_hidden)
target_hidden = mx.sym.concat(layer_state, target_hidden_step, dim=1)
new_layer_states.append(mx.sym.slice_axis(data=target_hidden, axis=1, begin=1, end=kernel_width))
else:
# last state, here we only care about the latest hidden state:
# (batch_size, 1, num_hidden) -> (batch_size, num_hidden)
target_hidden = mx.sym.reshape(target_hidden_step, shape=(-3, -1))
# (batch_size, 1, source_encoded_max_length)
attention_probs = mx.sym.reshape(mx.sym.slice_axis(mx.sym.zeros_like(source_encoded),
axis=2, begin=0, end=1),
shape=(0, 1, -1))
return target_hidden, attention_probs, None, [source_encoded, source_encoded_lengths] + new_layer_states
def reset(self):
pass
def get_num_hidden(self) -> int:
"""
:return: The representation size of this decoder.
"""
return self.config.cnn_config.num_hidden
def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states.
"""
# Initially all layers get pad symbols as input (zeros)
# (batch_size, kernel_width, num_hidden)
num_hidden = self.config.cnn_config.num_hidden
kernel_width = self.config.cnn_config.kernel_width
# Note: We can not use mx.sym.zeros, as otherwise shape inference fails.
# Therefore we need to get a zero array of the right size through other means.
# (batch_size, 1, 1)
zeros = mx.sym.expand_dims(mx.sym.expand_dims(mx.sym.zeros_like(source_encoded_lengths), axis=1), axis=2)
# (batch_size, kernel_width-1, num_hidden)
next_layer_inputs = [mx.sym.tile(data=zeros, reps=(1, kernel_width - 1, num_hidden),
name="%s%d_init" % (self.prefix, layer_idx))
for layer_idx in range(0, self.config.num_layers)]
return [source_encoded, source_encoded_lengths] + next_layer_inputs
def state_variables(self, target_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns the list of symbolic variables for this decoder to be used during inference.
:param target_max_length: Current target sequence lengths.
:return: List of symbolic variables.
"""
# we keep a fixed slice of the layer inputs as a state for all upper layers:
next_layer_inputs = [mx.sym.Variable("cnn_layer%d_in" % layer_idx)
for layer_idx in range(0, self.config.num_layers)]
return [mx.sym.Variable(C.SOURCE_ENCODED_NAME),
mx.sym.Variable(C.SOURCE_LENGTH_NAME)] + next_layer_inputs
def state_shapes(self,
batch_size: int,
target_max_length: int,
source_encoded_max_length: int,
source_encoded_depth: int) -> List[mx.io.DataDesc]:
"""
Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth.
Used for inference.
:param batch_size: Batch size during inference.
:param target_max_length: Current target sequence length.
:param source_encoded_max_length: Size of encoder time dimension.
:param source_encoded_depth: Depth of encoded source.
:return: List of shape descriptions.
"""
num_hidden = self.config.cnn_config.num_hidden
kernel_width = self.config.cnn_config.kernel_width
next_layer_inputs = [mx.io.DataDesc("cnn_layer%d_in" % layer_idx,
shape=(batch_size, kernel_width - 1, num_hidden),
layout="NTW")
for layer_idx in range(0, self.config.num_layers)]
return [mx.io.DataDesc(C.SOURCE_ENCODED_NAME,
(batch_size, source_encoded_max_length, source_encoded_depth),
layout=C.BATCH_MAJOR),
mx.io.DataDesc(C.SOURCE_LENGTH_NAME, (batch_size,), layout="N")] + next_layer_inputs
def get_max_seq_len(self) -> Optional[int]:
# The positional embeddings potentially pose a limit on the maximum length at inference time.
return self.pos_embedding.get_max_seq_len()
|
py | b40a3cdeb571004f8883946e3043cfb6c13f414f | #!/usr/bin/env python
"""Removes supplier name from existing agreement file names and adds a download filename header.
Usage:
scripts/oneoff/set-agreements-content-disposition.py <stage>
"""
import sys
sys.path.insert(0, '.')
import re
from docopt import docopt
from dateutil.parser import parse as parse_time
from dmutils.s3 import S3
from dmutils.formats import DATETIME_FORMAT
from dmscripts.helpers import logging_helpers
logger = logging_helpers.configure_logger()
def make_copier(src_bucket, target_bucket):
def copy_file_with_content_disposition(src_path, target_path, download_filename):
src_key = src_bucket.bucket.get_key(src_path)
target_bucket.bucket.copy_key(
target_path,
src_bucket_name=src_bucket.bucket_name,
src_key_name=src_path,
preserve_acl=True,
metadata={
"timestamp": parse_time(src_key.last_modified).strftime(DATETIME_FORMAT),
"Content-Disposition": 'attachment; filename="{}"'.format(download_filename),
}
)
return copy_file_with_content_disposition
def path_without_supplier_name(path):
folder, _, filename = path.rpartition('/')
# Skip archived file versions starting with timestamps
if re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{6}', filename):
logger.info("Skipping old file version {}".format(path))
return None, None
# Find filenames starting with a supplier name prefix and remove it
match = re.search(r'-(\d{5,6}-.*)', filename)
if not match:
logger.info("{} does not match pattern".format(path))
return None, None
return "/".join([folder, match.group(1)]), filename
def main(stage):
agreements_bucket_name = 'digitalmarketplace-agreements-{0}-{0}'.format(stage)
agreements_bucket = S3(agreements_bucket_name)
copy_file = make_copier(agreements_bucket, agreements_bucket)
agreements_files = agreements_bucket.list('g-cloud-7/agreements/')
for key in agreements_files:
new_path, download_filename = path_without_supplier_name(key['path'])
if not new_path:
continue
if any(k['path'] == new_path for k in agreements_files):
logger.info("Not replacing %s, file already exists", new_path)
continue
logger.info("Copying '%s' to '%s' with filename '%s'", key['path'], new_path, download_filename)
copy_file(key['path'], new_path, download_filename=download_filename)
if __name__ == '__main__':
arguments = docopt(__doc__)
stage = arguments['<stage>']
main(stage)
|
py | b40a3ce59d6a2c8d5f5947002eb4df348dad674e | #!/usr/bin/env python
import sys
fname = sys.argv[1]
def torgb5(v):
r = (v >> 16) & 0xFF
g = (v >> 8) & 0xFF
b = (v >> 0) & 0xFF
low = ((g << 3) & 0xC0) | (b >> 2) | 1
high = (r & 0xF8) | (g >> 5)
return (high << 8) | low
with open(fname, 'r') as f:
words = [word for line in f for word in line.split() ]
data = map(torgb5, map(lambda x: int(x, 16), words))
print ' u16 nesRgb[] =\n { '
print ', '.join(map(lambda x: '0x%04X' % x, data))
print '};'
|
py | b40a3e0b80a4f48fcfeeb6234d63df697b91c84f | # coding: utf-8
"""Test parsing of COUNTER BR2 book report."""
from __future__ import absolute_import
import os
import unittest
import warnings
import pycounter.report
class ParseExample(unittest.TestCase):
"""Tests for parsing C3 BR2"""
def setUp(self):
self.report = pycounter.report.parse(
os.path.join(os.path.dirname(__file__),
'data/simpleBR2.csv'))
def test_reportname(self):
self.assertEqual(self.report.report_type, u'BR2')
self.assertEqual(self.report.report_version, 1)
def test_year(self):
self.assertEqual(self.report.year, 2012)
def test_platform(self):
for publication in self.report:
self.assertEqual(publication.publisher, u"Megadodo Publications")
self.assertEqual(publication.platform, u"HHGTTG Online")
def test_stats(self):
publication = self.report.pubs[0]
self.assertEqual(
[x[2] for x in publication],
[0, 25, 0, 0, 0, 0])
class ParseCounter4Example(unittest.TestCase):
"""Tests for parsing C4 BR2"""
def setUp(self):
self.report = pycounter.report.parse(
os.path.join(os.path.dirname(__file__),
'data/C4BR2.tsv'))
def test_reportname(self):
self.assertEqual(self.report.report_type, u'BR2')
self.assertEqual(self.report.report_version, 4)
def test_year(self):
self.assertEqual(self.report.year, 2012)
def test_platform(self):
for publication in self.report:
self.assertEqual(publication.publisher, u"Megadodo Publications")
self.assertEqual(publication.platform, u"HHGTTG Online")
def test_stats(self):
publication = self.report.pubs[0]
self.assertEqual(
[x[2] for x in publication],
[0, 25, 0, 0, 0, 0])
def test_metric(self):
self.assertEqual(self.report.metric, u"Book Section Requests")
class ParseLatin1(unittest.TestCase):
"""Tests for parsing BR2 in latin-1 encoding"""
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.report = pycounter.report.parse(
os.path.join(os.path.dirname(__file__),
'data/simpleBR2_latin_1.csv'))
def test_title(self):
publication = self.report.pubs[1]
self.assertEqual(publication.title, u'Öfake Book')
|
py | b40a3e729cb98df8990a35e9948c12ee39bb21ae | from django.db import models
from django.contrib.auth.models import User, Group
# Create your models here.
class Client(models.Model):
user = models.ForeignKey(User, to_field="username", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=16, null=True)
|
py | b40a3ee3e44bab361274a2c64c8c634990b72f12 | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oxford-IIIT pet dataset."""
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
The Oxford-IIIT pet dataset is a 37 category pet image dataset with roughly 200
images for each class. The images have large variations in scale, pose and
lighting. All images have an associated ground truth annotation of breed.
"""
_CITATION = """\
@InProceedings{parkhi12a,
author = "Parkhi, O. M. and Vedaldi, A. and Zisserman, A. and Jawahar, C.~V.",
title = "Cats and Dogs",
booktitle = "IEEE Conference on Computer Vision and Pattern Recognition",
year = "2012",
}
"""
_NUM_SHARDS = 1
_BASE_URL = "http://www.robots.ox.ac.uk/~vgg/data/pets/data"
class OxfordIIITPet(tfds.core.GeneratorBasedBuilder):
"""Oxford-IIIT pet dataset."""
VERSION = tfds.core.Version("1.1.0")
SUPPORTED_VERSIONS = [
tfds.core.Version("2.0.0", experiments={tfds.core.Experiment.S3: True}),
tfds.core.Version("1.1.0"),
]
# Version history:
# 2.0.0: S3 (new shuffling, sharding and slicing mechanism).
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(),
"label": tfds.features.ClassLabel(num_classes=37),
"file_name": tfds.features.Text(),
}),
supervised_keys=("image", "label"),
urls=["http://www.robots.ox.ac.uk/~vgg/data/pets/"],
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns splits."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({
"images": tfds.download.Resource(
url=os.path.join(_BASE_URL, "images.tar.gz"),
extract_method=tfds.download.ExtractMethod.TAR),
"annotations": tfds.download.Resource(
url=os.path.join(_BASE_URL, "annotations.tar.gz"),
extract_method=tfds.download.ExtractMethod.TAR)
})
images_path_dir = os.path.join(dl_paths["images"], "images")
annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations")
# Setup train and test splits
train_split = tfds.core.SplitGenerator(
name="train",
num_shards=_NUM_SHARDS,
gen_kwargs={
"images_dir_path": images_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"trainval.txt"),
},
)
test_split = tfds.core.SplitGenerator(
name="test",
num_shards=_NUM_SHARDS,
gen_kwargs={
"images_dir_path": images_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"test.txt")
},
)
return [train_split, test_split]
def _generate_examples(self, images_dir_path, images_list_file):
with tf.io.gfile.GFile(images_list_file, "r") as images_list:
for line in images_list:
image_name, label, _, _ = line.strip().split(" ")
image_name += ".jpg"
label = int(label) - 1
record = {
"image": os.path.join(images_dir_path, image_name),
"label": int(label),
"file_name": image_name
}
if self.version.implements(tfds.core.Experiment.S3):
yield image_name, record
else:
yield record
|
py | b40a404d875a970ef72c06f65c5e03745e84f742 | #! /usr/bin/env python3
###
# KINOVA (R) KORTEX (TM)
#
# Copyright (c) 2019 Kinova inc. All rights reserved.
#
# This software may be modified and distributed under the
# terms of the BSD 3-Clause license.
#
# Refer to the LICENSE file for details.
#
###
###
# 105-Gen3_i2c_bridge/i2c_bridge.py
#
# Usage example for the I2C Expansion IO on the Gen3 Interconnect module.
#
# PHYSICAL SETUP:
# =========
#
# We used a I2C IO Extender PCA9505 for this example (https://www.nxp.com/docs/en/data-sheet/PCA9505_9506.pdf) and made the connections:
# - SDA -------------- Pin 1
# - SCL -------------- Pin 2
# - DC --------------- Pins 18, 46, 86
# - GND -------------- Pins 6, 11, 23, 27, 28, 29, 34, 39, 51
#
# DESCRIPTION OF CURRENT EXAMPLE:
# ===============================
# In this example, the I2C bridge class encapsulates all necessary Kortex API
# objects and implements the functions to setup the I2C, write to the bus and read from the bus.
# Upon a read request, the IO Extender supplies the input values read from the bank 0 pins (IO0_X pins from page 5 of the datasheet)
# The IO Extender can take configuration commands from write requests and the register values are supplied in the data sheet.
#
# For this example, we first read the values of IO0_X pins, then invert the polarity on half of the pins and read them again.
#
# The Init function creates the Kortex API objects and connects to the arm.
#
# The Configure function uses the appropriate RPC to activate the bridge with the desired settings.
#
# The WriteValue function writes a request to the bus.
#
# The ReadValue function initiates a read request to the bus.
#
# The main function :
# - Initializes the I2CBridge object
# - Creates and sends a read request to read from the bank 0 pins on the IO Extender
# - Creates and sends a write request to invert the polarity of half the bank 0 pins on the IO Extender
# - Creates and sends a read request to read from the bank 0 pins on the IO Extender (which will not give the same output as the first request)
###
import sys
import os
import time
from kortex_api.autogen.client_stubs.DeviceManagerClientRpc import DeviceManagerClient
from kortex_api.autogen.client_stubs.InterconnectConfigClientRpc import InterconnectConfigClient
from kortex_api.autogen.messages import Common_pb2
from kortex_api.autogen.messages import InterconnectConfig_pb2
from kortex_api.Exceptions import KServerException
class I2CBridge:
def __init__(self, router):
'''
Implements methods for establishing and operating I2C bridge through
the base
'''
self.router = router
# Create device manager client. Device manager is used get a list of devices present in the arm. In this example
# we use device manager to determine the device ID associated with the interconnect.
self.device_manager = DeviceManagerClient(self.router)
# Create interconnect configuration client. This client is used to perform I2C bus configuration and I2C bus actions.
self.interconnect_config = InterconnectConfigClient(self.router)
self.interconnect_device_id = self.GetDeviceIdFromDevType(Common_pb2.INTERCONNECT, 0)
if (self.interconnect_device_id is None):
print ("Could not find the Interconnect in the device list, exiting...")
sys.exit(0)
"""
GetDeviceIdFromDevType(devType, devIndex)
Get device ID according to a given device type (Actuator or interconnect).
Inputs:
devType : Device type
Index argument correspond to the position of the device (i.e.: 0 being the first,1 the second, etc.)
"""
def GetDeviceIdFromDevType(self, device_type, device_index = 0):
devices = self.device_manager.ReadAllDevices()
current_index = 0
for device in devices.device_handle:
if device.device_type == device_type:
if current_index == device_index:
print ("Found the Interconnect on device identifier {}".format(device.device_identifier))
return device.device_identifier
current_index += 1
return None
"""
WriteValue(device_address, data, timeout_ms)
Writes a data array to I2C bus to a given device.
inputs:
device_address: device's I2C address.
data: list containing data to write to device
timeout_ms: write operation timeout in milliseconds
"""
def WriteValue(self, device_address, data, timeout_ms):
i2c_write_parameter = InterconnectConfig_pb2.I2CWriteParameter()
i2c_write_parameter.device = InterconnectConfig_pb2.I2C_DEVICE_EXPANSION
i2c_write_parameter.device_address = device_address
bytesData = bytes(data)
i2c_write_parameter.data.data = bytesData
i2c_write_parameter.data.size = len(bytesData)
i2c_write_parameter.timeout = timeout_ms
return self.interconnect_config.I2CWrite(i2c_write_parameter, deviceId=self.interconnect_device_id)
"""
ReadValue(device_address, bytes_to_read, timeout_ms)
Reads a data array from I2C bus from a given device.
inputs:
device_address: device's I2C address.
bytes_to_read: number of bytes to read from device
timeout_ms: read operation timeout in milliseconds
"""
def ReadValue(self, device_address, bytes_to_read, timeout_ms):
# Create the I2C read request
i2c_read_request = InterconnectConfig_pb2.I2CReadParameter()
i2c_read_request.device = InterconnectConfig_pb2.I2C_DEVICE_EXPANSION
i2c_read_request.device_address = device_address
i2c_read_request.size = bytes_to_read
i2c_read_request.timeout = timeout_ms
# Read the data and print it
read_result = self.interconnect_config.I2CRead(i2c_read_request, deviceId=self.interconnect_device_id)
data = read_result.data
print ("We were supposed to read {} bytes and we read {} bytes.".format(bytes_to_read, read_result.size))
print ("The data is : {0:b}".format(ord(data)))
"""
Configure(is_enabled, mode, addressing)
Configure expansion bus I2C bus on interconnect.
Inputs:
is_enabled: Enables i2cbus on interconnect's expansion bus if true, disable it otherwise.
mode: I2C mode in which the bus is set ( InterconnectConfig_pb2.I2C_MODE_STANDARD,
InterconnectConfig_pb2.I2C_MODE_FAST or InterconnectConfig_pb2.I2C_MODE_FAST_PLUS)
addressing: Addressing size used on I2C bus (I2C_DEVICE_ADDRESSING_7_BITS or
I2C_DEVICE_ADDRESSING_10_BITS).
"""
def Configure(self, is_enabled, mode, addressing):
# Create the configuration
I2CConfiguration = InterconnectConfig_pb2.I2CConfiguration()
I2CConfiguration.device = InterconnectConfig_pb2.I2C_DEVICE_EXPANSION
I2CConfiguration.enabled = is_enabled
I2CConfiguration.mode = mode
I2CConfiguration.addressing = addressing
# Set the configuration
self.interconnect_config.SetI2CConfiguration(I2CConfiguration, deviceId=self.interconnect_device_id)
def main():
# Import the utilities helper module
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import utilities
# Parse arguments
parser = argparse.ArgumentParser()
args = utilities.parseConnectionArguments(parser)
# Create connection to the device and get the router
with utilities.DeviceConnection.createTcpConnection(args) as router:
# Create the gpio bridge object. It implements kortex methods used
# configure and use interconnect's expansion I2C
bridge = I2CBridge(router)
# This has to match the device's slave address (see data sheet)
slave_address = 0x20
# Configure I2C bridge
bridge.Configure(True, InterconnectConfig_pb2.I2C_MODE_FAST, InterconnectConfig_pb2.I2C_DEVICE_ADDRESSING_7_BITS)
time.sleep(1)
print ("I2C bridge object initialized")
# Read the state of the pins in bank 0
print ("Reading byte array from interconnect I2C bus...")
try:
bytes_to_read = 1
bridge.ReadValue(slave_address, bytes_to_read, 100)
time.sleep(0.5)
except Exception as ex:
print ("Error : {}".format(ex))
# Send byte array to inverse polarity on half the pins
print ("Sending byte array to interconnect I2C bus...")
try:
# By looking at the data sheet, we see that to write to the polarity register,
# we have to send command register 0x10 as the first byte, then our data byte
buf = bytes([0x10, 0xAA])
bridge.WriteValue(slave_address, buf, 100)
time.sleep(0.5)
except Exception as ex:
print ("Error : {}".format(ex))
# Read the state of the pins in bank 0
# Half of them should be inverted now
print ("Reading byte array from interconnect I2C bus...")
try:
bytes_to_read = 1
bridge.ReadValue(slave_address, bytes_to_read, 100)
time.sleep(0.5)
except Exception as ex:
print ("Error : {}".format(ex))
if __name__ == "__main__":
main() |
py | b40a406859d80d8eb6178826d538703a51821505 | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from examples.display_data import display_data
from parlai.core.params import ParlaiParser
import sys
import unittest
class TestDisplayData(unittest.TestCase):
"""Basic tests on the display_data.py example."""
args = [
'--task', 'babi:task1k:1',
]
parser = ParlaiParser()
opt = parser.parse_args(args, print_args=False)
opt['num_examples'] = 1
def test_output(self):
"""Does display_data reach the end of the loop?"""
class display_output(object):
def __init__(self):
self.data = []
def write(self, s):
self.data.append(s)
def __str__(self):
return "".join(self.data)
old_out = sys.stdout
output = display_output()
try:
sys.stdout = output
display_data(self.opt)
finally:
# restore sys.stdout
sys.stdout = old_out
str_output = str(output)
self.assertTrue(len(str_output) > 0, "Output is empty")
self.assertTrue("[babi:task1k:1]:" in str_output,
"Babi task did not print")
self.assertTrue("~~" in str_output, "Example output did not complete")
if __name__ == '__main__':
unittest.main()
|
py | b40a41e8e8c9ab9ce720189036f4acfe65ae9c31 | from JumpScale import j
from ClassBase import ClassBase
class Appserver6GreenletTaskletsBase(ClassBase):
def __init__(self,taskletsPath):
self.actor=""
self.method=""
self.description=""
self.server=None #is link to app server which will serve this greenlet, not to be filled in by coder
self.paramvalidation={}#$paramvalidation
self.paramdefault={}
self.paramdescription={}#$paramdescr
self.paramoptional={}
self.taskletsPath=taskletsPath
self.service=""#service object (e.g. the appobject) we want to give to tasklets
self.te=j.core.taskletengine.get(self.taskletsPath)
def wscall(self,ctx,server):
params=self.te.execute(ctx.params,service=self.service,job=None,tags=None)
return params.result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.