ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3e46d50aa859e0ac2a41a82c5b02569b46c666 | from .api import BitCoreAPI
name = "pybitcore" |
py | 1a3e483330ed6bf5fc7fe411681638ef3d178d14 | # Test script for opening and closing gate
def main(robot):
# Setup
timestep = int(robot.getBasicTimeStep())
# Main loop, perform simulation steps until Webots is stopping the controller
while robot.step(timestep) != -1:
if not(gate_open := robot.gate.open()):
print(gate_open)
|
py | 1a3e48dc6b680f088cb050184c14671f43cf9847 | # Code generated by github.com/lolopinto/ent/ent, DO NOT edit.
"""add auth_codes table
Revision ID: e755edcfdb53
Revises: 21447db32a76
Create Date: 2020-12-07 23:10:15.956128+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'e755edcfdb53'
down_revision = '21447db32a76'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('auth_codes',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('updated_at', sa.TIMESTAMP(), nullable=False),
sa.Column('code', sa.Text(), nullable=False),
sa.Column('user_id', postgresql.UUID(), nullable=False),
sa.Column('email_address', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(
['user_id'], ['users.id'], name='auth_codes_user_id_fkey', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name='auth_codes_id_pkey')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('auth_codes')
# ### end Alembic commands ###
|
py | 1a3e4968d40f76e800001d13237e1a790e36afb8 | import spidev
import time
import os
#Open SPI bus
spi = spidev.SpiDev()
spi.open (0,0)
spi.max_speed_hz=1000000
#Function to read SPI data from MCP3008 chip
#Channel must be an int 0-7
def ReadChanel(channel):
adc = spi.xref2([1,(8+channel)<<4,0])
data = ((adc[1]&3)<<8) + adc[2]
return data
#Functio to convert data to voltage level,
# rounded to specified number of decimal places.
def ConvertVolts(data,places):
volts = (data * 3.3) / float(1023)
volts = round(volts,places)
return volts
while True:
# Read the light sensor data
light_level = ReadChannel(1)
light_volts = ConvertVolts(light_level,2)
# Print out results
print "--------------------------------------------"
print("Light: {} ({}V)".format(light_level,light_volts))
# Wait before repeating loop
time.sleep(1) |
py | 1a3e4a7adace93d96fb17d4acf6affa20c4535c6 | import re
import numpy as np
import matplotlib.pyplot as plt
def _plot_primitives_boxplots(log_file):
with open(log_file, 'r') as file:
lines = file.readlines()
# read Primitives
p = [re.findall('P=\d+', line) for line in lines]
p = [v[0] for v in p if v]
p = np.array([float(v.split('=')[1]) for v in p])
p = [p[:int(len(p) / 2)], p[int(len(p) / 2):]]
# read metrics
metrics_cls, metrics_att = [], []
for re_str in ['AP=\d+\.\d+', 'FScore=\d+\.\d+']:
m = [re.findall(re_str, line) for line in lines]
m = [v[0] for v in m if v]
m = np.array([float(v.split('=')[1]) for v in m])
m = [m[:int(len(m) / 2)], m[int(len(m) / 2):]]
metrics_cls.append([m[0][p[0] < 200], m[1][p[1] < 200]])
metrics_att.append([m[0][p[0] >= 200], m[1][p[1] >= 200]])
# plot boxplots
for ylabel, cls, att in zip(['Average Precision (Ap)', 'FScore'], metrics_cls, metrics_att):
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.boxplot(cls, labels=['Train', 'Test'], showmeans=True)
ax1.set_title('Objects', fontsize=16)
ax1.tick_params(axis='x', labelsize=12)
ax1.set_ylabel(ylabel, fontsize=12)
ax2.boxplot(att, labels=['Train', 'Test'], showmeans=True)
ax2.set_title('Attributes', fontsize=16)
ax2.tick_params(axis='x', labelsize=12)
f.tight_layout()
plt.show()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Script for computing metrics for Neural Algebra of Classifiers models",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('log_file', type=str, help='Paths to log file.')
args = parser.parse_args()
_plot_primitives_boxplots(args.log_file)
|
py | 1a3e4a98ef75f052e5591ab96cada21a3c9dcd54 | """Facilities for implementing hooks that call shell commands."""
from __future__ import print_function
import logging
import os
from subprocess import Popen, PIPE
from certbot import errors
logger = logging.getLogger(__name__)
def validate_hooks(config):
"""Check hook commands are executable."""
_validate_hook(config.pre_hook, "pre")
_validate_hook(config.post_hook, "post")
_validate_hook(config.renew_hook, "renew")
def _prog(shell_cmd):
"""Extract the program run by a shell command"""
cmd = _which(shell_cmd)
return os.path.basename(cmd) if cmd else None
def _validate_hook(shell_cmd, hook_name):
"""Check that a command provided as a hook is plausibly executable.
:raises .errors.HookCommandNotFound: if the command is not found
"""
if shell_cmd:
cmd = shell_cmd.split(None, 1)[0]
if not _prog(cmd):
path = os.environ["PATH"]
msg = "Unable to find {2}-hook command {0} in the PATH.\n(PATH is {1})".format(
cmd, path, hook_name)
raise errors.HookCommandNotFound(msg)
def pre_hook(config):
"Run pre-hook if it's defined and hasn't been run."
if config.pre_hook and not pre_hook.already:
logger.info("Running pre-hook command: %s", config.pre_hook)
_run_hook(config.pre_hook)
pre_hook.already = True
pre_hook.already = False
def post_hook(config, final=False):
"""Run post hook if defined.
If the verb is renew, we might have more certs to renew, so we wait until
we're called with final=True before actually doing anything.
"""
if config.post_hook:
if not pre_hook.already:
logger.info("No renewals attempted, so not running post-hook")
if config.verb != "renew":
logger.warn("Sanity failure in renewal hooks")
return
if final or config.verb != "renew":
logger.info("Running post-hook command: %s", config.post_hook)
_run_hook(config.post_hook)
def renew_hook(config, domains, lineage_path):
"Run post-renewal hook if defined."
if config.renew_hook:
if not config.dry_run:
os.environ["RENEWED_DOMAINS"] = " ".join(domains)
os.environ["RENEWED_LINEAGE"] = lineage_path
_run_hook(config.renew_hook)
else:
logger.warning("Dry run: skipping renewal hook command: %s", config.renew_hook)
def _run_hook(shell_cmd):
"""Run a hook command.
:returns: stderr if there was any"""
cmd = Popen(shell_cmd, shell=True, stdout=PIPE, stderr=PIPE, stdin=PIPE)
_out, err = cmd.communicate()
if cmd.returncode != 0:
logger.error('Hook command "%s" returned error code %d', shell_cmd, cmd.returncode)
if err:
logger.error('Error output from %s:\n%s', _prog(shell_cmd), err)
def _is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def _which(program):
"""Test if program is in the path."""
# Borrowed from:
# https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
# XXX May need more porting to handle .exe extensions on Windows
fpath, _fname = os.path.split(program)
if fpath:
if _is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if _is_exe(exe_file):
return exe_file
return None
|
py | 1a3e4b8d2a7894275df17c4736917e7d384dcbc4 | from contextlib import AsyncExitStack
from asyncpg import create_pool
from fastapi import FastAPI
from pytest import fixture
from fastapi_pagination import LimitOffsetPage, Page, add_pagination
from fastapi_pagination.ext.asyncpg import paginate
from ..base import BasePaginationTestCase
from ..utils import faker
@fixture(scope="session")
def database_url(postgres_url) -> str:
return postgres_url
@fixture(scope="session")
def pool(database_url):
return create_pool(database_url)
@fixture(scope="session")
def app(pool, model_cls):
app = FastAPI()
stack = AsyncExitStack()
@app.on_event("startup")
async def on_startup() -> None:
await stack.enter_async_context(pool)
@app.on_event("shutdown")
async def on_shutdown() -> None:
await stack.aclose()
@app.get("/default", response_model=Page[model_cls])
@app.get("/limit-offset", response_model=LimitOffsetPage[model_cls])
async def route():
async with pool.acquire() as conn:
return await paginate(conn, "SELECT id, name FROM users")
return add_pagination(app)
class TestAsyncpg(BasePaginationTestCase):
@fixture(scope="class")
async def entities(self, pool):
async with pool.acquire() as conn:
await conn.executemany(f"INSERT INTO users(name) VALUES ($1);", [(faker.name(),) for _ in range(100)])
return [{**user} for user in await conn.fetch("SELECT id, name FROM users;")]
|
py | 1a3e4c25d898b6afbcb9e025f8b41cf9838c76e3 | # -*- coding=utf-8 -*-
#Implmentation of anmm model based on bin sum input of QA matrix
from __future__ import print_function
from __future__ import absolute_import
import keras
import keras.backend as K
from keras.models import Sequential, Model
from keras.layers import *
from keras.activations import softmax
from model import BasicModel
from utils.utility import *
class ANMM(BasicModel):
def __init__(self, config):
super(ANMM, self).__init__(config)
self._name = 'ANMM'
self.check_list = [ 'text1_maxlen', 'bin_num',
'embed', 'embed_size', 'vocab_size',
'num_layers', 'hidden_sizes']
self.setup(config)
self.initializer_fc = keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=11)
self.initializer_gate = keras.initializers.RandomUniform(minval=-0.01, maxval=0.01, seed=11)
if not self.check():
raise TypeError('[ANMM] parameter check wrong')
print('[ANMM] init done', end='\n')
def setup(self, config):
if not isinstance(config, dict):
raise TypeError('parameter config should be dict:', config)
self.set_default('text1_maxlen', 10)
self.set_default('hist_size', 60)
self.set_default('dropout_rate', 0.)
self.config.update(config)
def build(self):
def tensor_product(x):
a = x[0]
b = x[1]
y = K.batch_dot(a, b, axis=1)
y = K.einsum('ijk, ikl->ijl', a, b)
return y
query = Input(name='query', shape=(self.config['text1_maxlen'],))
show_layer_info('Input', query)
doc = Input(name='doc', shape=(self.config['text1_maxlen'], self.config['bin_num']))
show_layer_info('Input', doc)
embedding = Embedding(self.config['vocab_size'], self.config['embed_size'], weights=[self.config['embed']], trainable = False)
q_embed = embedding(query)
show_layer_info('Embedding', q_embed)
q_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False)(q_embed)
show_layer_info('Dense', q_w)
q_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config['text1_maxlen'], ))(q_w)
show_layer_info('Lambda-softmax', q_w)
z = doc
z = Dropout(rate=self.config['dropout_rate'])(z)
show_layer_info('Dropout', z)
for i in range(self.config['num_layers']-1):
z = Dense(self.config['hidden_sizes'][i], kernel_initializer=self.initializer_fc)(z)
z = Activation('tanh')(z)
show_layer_info('Dense', z)
z = Dense(self.config['hidden_sizes'][self.config['num_layers']-1], kernel_initializer=self.initializer_fc)(z)
show_layer_info('Dense', z)
z = Permute((2, 1))(z)
show_layer_info('Permute', z)
z = Reshape((self.config['text1_maxlen'],))(z)
show_layer_info('Reshape', z)
q_w = Reshape((self.config['text1_maxlen'],))(q_w)
show_layer_info('Reshape', q_w)
out_ = Dot( axes= [1, 1])([z, q_w])
if self.config['target_mode'] == 'classification':
out_ = Dense(2, activation='softmax')(out_)
show_layer_info('Dense', out_)
model = Model(inputs=[query, doc], outputs=[out_])
return model
|
py | 1a3e4c2f4361edc87b28f8655593e5802a81140f | from flask import render_template, redirect, url_for, flash, request
from . import auth
from flask_login import login_user, logout_user, login_required
from ..models import User
from .forms import LoginForm, RegistrationForm
from .. import db
from ..email import send_email
# register route
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
if User.query.filter_by(email=form.email.data).first():
flash('Email already exists', 'danger')
return redirect(url_for('auth.register'))
elif User.query.filter_by(username=form.username.data).first():
flash('Username already exists', 'danger')
return redirect(url_for('auth.register'))
elif len(form.password.data) < 8:
flash('Password must be at least 8 characters', 'danger')
return redirect(url_for('auth.register'))
else:
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data,name=form.name.data)
db.session.add(user)
db.session.commit()
flash('Account created successfully. Please Login', 'success')
return redirect(url_for('auth.login'))
title = "Create New Account"
return render_template('auth/register.html', form=form, title=title)
# login route
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.profile', username=user.username))
flash('Invalid username or password', 'danger')
title = "Login to your account"
return render_template('auth/login.html', form=form, title=title)
# logout route
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.', 'success')
return redirect(url_for('main.index'))
|
py | 1a3e4c76f049a1722f127f5379f55b749ab866b2 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Train a video classification model."""
import numpy as np
import pprint
import torch
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
import slowfast.models.losses as losses
import slowfast.models.optimizer as optim
import slowfast.utils.checkpoint as cu
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
import slowfast.utils.misc as misc
import slowfast.visualization.tensorboard_vis as tb
from slowfast.datasets import loader
from slowfast.models import build_model
from slowfast.utils.meters import AVAMeter, TrainMeter, ValMeter
from slowfast.utils.multigrid import MultigridSchedule
logger = logging.get_logger(__name__)
def train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, writer=None
):
"""
Perform the video training for one epoch.
Args:
train_loader (loader): video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's
parameters.
train_meter (TrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Enable train mode.
model.train()
train_meter.iter_tic()
data_size = len(train_loader)
for cur_iter, (inputs, labels, _, meta) in enumerate(train_loader):
# Transfer the data to the current GPU device.
if cfg.NUM_GPUS:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
# Update the learning rate.
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer, lr)
train_meter.data_toc()
if cfg.DETECTION.ENABLE:
preds = model(inputs, meta["boxes"])
else:
preds = model(inputs)
# Explicitly declare reduction to mean.
loss_fun = losses.get_loss_func(cfg.MODEL.LOSS_FUNC)(reduction="mean")
# Compute the loss.
loss = loss_fun(preds, labels)
# check Nan Loss.
misc.check_nan_losses(loss)
# Perform the backward pass.
optimizer.zero_grad()
loss.backward()
# Update the parameters.
optimizer.step()
if cfg.DETECTION.ENABLE:
if cfg.NUM_GPUS > 1:
loss = du.all_reduce([loss])[0]
loss = loss.item()
# Update and log stats.
train_meter.update_stats(None, None, None, loss, lr)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Train/loss": loss, "Train/lr": lr},
global_step=data_size * cur_epoch + cur_iter,
)
else:
top1_err, top5_err = None, None
if cfg.DATA.MULTI_LABEL:
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
[loss] = du.all_reduce([loss])
loss = loss.item()
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point).
loss, top1_err, top5_err = (
loss.item(),
top1_err.item(),
top5_err.item(),
)
# Update and log stats.
train_meter.update_stats(
top1_err,
top5_err,
loss,
lr,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{
"Train/loss": loss,
"Train/lr": lr,
"Train/Top1_err": top1_err,
"Train/Top5_err": top5_err,
},
global_step=data_size * cur_epoch + cur_iter,
)
train_meter.iter_toc() # measure allreduce for this meter
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats.
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
@torch.no_grad()
def eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer=None):
"""
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Evaluation mode enabled. The running stats would not be updated.
model.eval()
val_meter.iter_tic()
for cur_iter, (inputs, labels, _, meta) in enumerate(val_loader):
if cfg.NUM_GPUS:
# Transferthe data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
val_meter.data_toc()
if cfg.DETECTION.ENABLE:
# Compute the predictions.
preds = model(inputs, meta["boxes"])
ori_boxes = meta["ori_boxes"]
metadata = meta["metadata"]
if cfg.NUM_GPUS:
preds = preds.cpu()
ori_boxes = ori_boxes.cpu()
metadata = metadata.cpu()
if cfg.NUM_GPUS > 1:
preds = torch.cat(du.all_gather_unaligned(preds), dim=0)
ori_boxes = torch.cat(du.all_gather_unaligned(ori_boxes), dim=0)
metadata = torch.cat(du.all_gather_unaligned(metadata), dim=0)
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(preds, ori_boxes, metadata)
else:
preds = model(inputs)
if cfg.DATA.MULTI_LABEL:
if cfg.NUM_GPUS > 1:
preds, labels = du.all_gather([preds, labels])
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
# Combine the errors across the GPUs.
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point).
top1_err, top5_err = top1_err.item(), top5_err.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
top1_err,
top5_err,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Val/Top1_err": top1_err, "Val/Top5_err": top5_err},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
val_meter.update_predictions(preds, labels)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
# Log epoch stats.
val_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
if cfg.DETECTION.ENABLE:
writer.add_scalars(
{"Val/mAP": val_meter.full_map}, global_step=cur_epoch
)
else:
all_preds = [pred.clone().detach() for pred in val_meter.all_preds]
all_labels = [
label.clone().detach() for label in val_meter.all_labels
]
if cfg.NUM_GPUS:
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds, labels=all_labels, global_step=cur_epoch
)
val_meter.reset()
def calculate_and_update_precise_bn(loader, model, num_iters=200, use_gpu=True):
"""
Update the stats in bn layers by calculate the precise stats.
Args:
loader (loader): data loader to provide training data.
model (model): model to update the bn stats.
num_iters (int): number of iterations to compute and update the bn stats.
use_gpu (bool): whether to use GPU or not.
"""
def _gen_loader():
for inputs, *_ in loader:
if use_gpu:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
yield inputs
# Update the bn stats.
update_bn_stats(model, _gen_loader(), num_iters)
def build_trainer(cfg):
"""
Build training model and its associated tools, including optimizer,
dataloaders and meters.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
Returns:
model (nn.Module): training model.
optimizer (Optimizer): optimizer.
train_loader (DataLoader): training data loader.
val_loader (DataLoader): validatoin data loader.
precise_bn_loader (DataLoader): training data loader for computing
precise BN.
train_meter (TrainMeter): tool for measuring training stats.
val_meter (ValMeter): tool for measuring validation stats.
"""
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = loader.construct_loader(
cfg, "train", is_precise_bn=True
)
# Create meters.
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
return (
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
)
def train(cfg):
"""
Train a video model for many epochs on train set and evaluate it on val set.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Init multigrid.
multigrid = None
if cfg.MULTIGRID.LONG_CYCLE or cfg.MULTIGRID.SHORT_CYCLE:
multigrid = MultigridSchedule()
cfg = multigrid.init_multigrid(cfg)
if cfg.MULTIGRID.LONG_CYCLE:
cfg, _ = multigrid.update_long_cycle(cfg, cur_epoch=0)
# Print config.
logger.info("Train with config:")
logger.info(pprint.pformat(cfg))
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Load a checkpoint to resume training if applicable.
start_epoch = cu.load_train_checkpoint(cfg, model, optimizer)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = (
loader.construct_loader(cfg, "train", is_precise_bn=True)
if cfg.BN.USE_PRECISE_STATS
else None
)
# Create meters.
if cfg.DETECTION.ENABLE:
train_meter = AVAMeter(len(train_loader), cfg, mode="train")
val_meter = AVAMeter(len(val_loader), cfg, mode="val")
else:
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
# set up writer for logging to Tensorboard format.
if cfg.TENSORBOARD.ENABLE and du.is_master_proc(
cfg.NUM_GPUS * cfg.NUM_SHARDS
):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
# Perform the training loop.
logger.info("Start epoch: {}".format(start_epoch + 1))
for cur_epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):
if cfg.MULTIGRID.LONG_CYCLE:
cfg, changed = multigrid.update_long_cycle(cfg, cur_epoch)
if changed:
(
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
) = build_trainer(cfg)
# Load checkpoint.
if cu.has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = cu.get_last_checkpoint(cfg.OUTPUT_DIR)
assert "{:05d}.pyth".format(cur_epoch) in last_checkpoint
else:
last_checkpoint = cfg.TRAIN.CHECKPOINT_FILE_PATH
logger.info("Load from {}".format(last_checkpoint))
cu.load_checkpoint(
last_checkpoint, model, cfg.NUM_GPUS > 1, optimizer
)
# Shuffle the dataset.
loader.shuffle_dataset(train_loader, cur_epoch)
# Train for one epoch.
train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, writer
)
is_checkp_epoch = cu.is_checkpoint_epoch(
cfg,
cur_epoch,
None if multigrid is None else multigrid.schedule,
)
is_eval_epoch = misc.is_eval_epoch(
cfg, cur_epoch, None if multigrid is None else multigrid.schedule
)
# Compute precise BN stats.
if (
(is_checkp_epoch or is_eval_epoch)
and cfg.BN.USE_PRECISE_STATS
and len(get_bn_modules(model)) > 0
):
calculate_and_update_precise_bn(
precise_bn_loader,
model,
min(cfg.BN.NUM_BATCHES_PRECISE, len(precise_bn_loader)),
cfg.NUM_GPUS > 0,
)
_ = misc.aggregate_sub_bn_stats(model)
# Save a checkpoint.
if is_checkp_epoch:
cu.save_checkpoint(cfg.OUTPUT_DIR, model, optimizer, cur_epoch, cfg)
# Evaluate the model on validation set.
if is_eval_epoch:
eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer)
if writer is not None:
writer.close()
|
py | 1a3e4dd0d6bcb678eb6a915ea4354129892939c2 | #------------------------------------------------------------------------------
# test_try.py
#------------------------------------------------------------------------------
# BSD 3-Clause License
#
# Copyright (c) 2018, Affirm
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
import re
import unittest
from functools import partial
from pyfnz.tri import *
#------------------------------------------------------------------------------
# test classes
#------------------------------------------------------------------------------
class TryTest(unittest.TestCase):
#--------------------------------------------------------------------------
# tests
#--------------------------------------------------------------------------
def test_init(self):
"""Test initilizing a Try.
"""
failure = lambda: 1 / 0
failure_args = lambda x, y=1 : x / y
success = lambda: 1 + 1
success_args = lambda x, y=1: x + y
self.assertTrue(Try(failure).is_failure)
self.assertTrue(Try(failure, 1, y=0).is_failure)
self.assertTrue(Try(success).is_success)
self.assertTrue(Try(success_args, 1, y=2).is_success)
#--------------------------------------------------------------------------
def test_slots(self):
"""Test slots directive is correctly working.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
self.assertTrue(failure.is_failure)
with self.assertRaises(AttributeError):
failure.a = 1
self.assertTrue(success.is_success)
with self.assertRaises(AttributeError):
success.a = 1
#--------------------------------------------------------------------------
def test_repr(self):
"""Test string representation.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
self.assertTrue(re.match("Failure\(.+\)", repr(failure)) is not None)
self.assertEqual("Success(2)", repr(success))
#--------------------------------------------------------------------------
def test_do(self):
"""Test do notation.
"""
failure = Try(lambda: 1 / 0)
success1 = Try(lambda: 1 + 1)
success2 = Try(lambda: 2 * 2)
failure_result = Try.do(f * s
for f in failure
for s in success1)
success_result = Try.do(s1 * s2
for s1 in success1
for s2 in success2)
self.assertEqual(0, failure_result | 0)
self.assertEqual(8, success_result | 0)
#--------------------------------------------------------------------------
def test_is_failure(self):
"""Test checking if try is a failure.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
self.assertTrue(failure.is_failure())
self.assertFalse(success.is_failure())
#--------------------------------------------------------------------------
def test_is_success(self):
"""Test checking if try is a success.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
self.assertTrue(success.is_success())
self.assertFalse(failure.is_success())
#--------------------------------------------------------------------------
def test_foreach(self):
"""Test running a function with a side-effects on a success.
"""
cache = []
cache_elem = lambda x: cache.append(x)
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure.foreach(cache_elem)
success.foreach(cache_elem)
self.assertEqual(1, len(cache))
self.assertEqual(2, cache[0])
#--------------------------------------------------------------------------
def test_to_either(self):
"""Test converting to an either.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure_either = failure.to_either()
success_either = success.to_either()
self.assertTrue(failure_either.is_left())
self.assertTrue(success_either.is_right())
#--------------------------------------------------------------------------
def test_get_success(self):
"""Test retrieving value contained in a successful Try.
"""
success = Try(lambda: 1 + 1)
failure = Try(lambda: 1 / 0)
success_result = success.get()
self.assertEqual(2, success_result)
with self.assertRaises(ZeroDivisionError):
failure.get()
#--------------------------------------------------------------------------
def test_get_or_else(self):
"""Test retrieving a value from a success else return default for
failure.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure1_result = failure.get_or_else(0)
success1_result = success.get_or_else(0)
failure2_result = failure | 0
success2_result = success | 0
self.assertEqual(0, failure1_result)
self.assertEqual(2, success1_result)
self.assertEqual(0, failure2_result)
self.assertEqual(2, success2_result)
#--------------------------------------------------------------------------
def test_or_else(self):
"""Test retrieving self or other either if failure.
"""
default_4 = Try(lambda: 2 + 2)
more_fail = Try(lambda: [][0])
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure1_result = failure.or_else(more_fail)
success1_result = success.or_else(more_fail)
failure2_result = failure.or_else(default_4)
success2_result = success.or_else(default_4)
self.assertEqual(0, failure1_result | 0)
self.assertEqual(2, success1_result | 0)
self.assertEqual(4, failure2_result | 0)
self.assertEqual(2, success2_result | 0)
#--------------------------------------------------------------------------
def test_recover(self):
"""Test recovering from a failure.
"""
default_9 = lambda e: 9 if isinstance(e, ZeroDivisionError) else None
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure_result = failure.recover(default_9)
success_result = success.recover(default_9)
self.assertEqual(9, failure_result | 0)
self.assertEqual(2, success_result | 0)
#--------------------------------------------------------------------------
def test_recover_with(self):
"""Test recovering from a failure.
"""
default_9 = lambda e: Try(lambda: 9 if isinstance(e, ZeroDivisionError) else None)
more_fail = lambda e: Try(lambda: [][0])
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure1_result = failure.recover_with(default_9)
success1_result = success.recover_with(default_9)
failure2_result = failure.recover_with(more_fail)
success2_result = success.recover_with(more_fail)
self.assertEqual(9, failure1_result | 0)
self.assertEqual(2, success1_result | 0)
self.assertEqual(0, failure2_result | 0)
self.assertEqual(2, success2_result | 0)
#--------------------------------------------------------------------------
def test_map(self):
"""Test running a function on a success.
"""
plus_5 = lambda x: x + 5
fail = lambda x: x[0]
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure1_plus = failure.map(plus_5)
success1_plus = success.map(plus_5)
failure2_fail = failure.map(fail)
success2_fail = success.map(fail)
self.assertEqual(0, failure1_plus | 0)
self.assertEqual(7, success1_plus | 0)
self.assertEqual(0, failure2_fail | 0)
self.assertEqual(0, success2_fail | 0)
#--------------------------------------------------------------------------
def test_pure(self):
"""Test turning a value into an Either.
"""
success1 = Try.pure(4)
success2 = Try.pure('a')
self.assertEqual(4, success1 | 0)
self.assertEqual('a', success2 | 'b')
#--------------------------------------------------------------------------
def test_flatmap(self):
"""Test binding through a success.
"""
plus_5_maybe = lambda x: Try(lambda: x + 5)
fail_maybe = lambda x: Try(lambda: [][0])
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure1_result = failure.flatmap(plus_5_maybe)
success1_result = success.flatmap(plus_5_maybe)
failure2_result = failure.flatmap(fail_maybe)
success2_result = success.flatmap(fail_maybe)
self.assertEqual(0, failure1_result | 0)
self.assertEqual(7, success1_result | 0)
self.assertEqual(0, failure2_result | 0)
self.assertEqual(0, success2_result | 0)
#--------------------------------------------------------------------------
def test_monad_laws(self):
"""Test the monad laws holds for Try.
"""
sub2 = lambda b: Try(lambda: b - 2)
div2 = lambda b: Try(lambda: b / 2)
# left unit | (unit >>= a) == a
self.assertEqual(Try.pure(6).flatmap(sub2), sub2(6))
# right unit | (a >>= unit) == a
self.assertEqual(sub2(6).flatmap(Try.pure), sub2(6))
# associative | ((a >>= b) >>= c) == (a >>= (b >>= c))
self.assertEqual(Try.pure(6).flatmap(lambda b: sub2(b).flatmap(div2)),
Try.pure(6).flatmap(sub2).flatmap(div2))
|
py | 1a3e4f55444e03ffc592b5197e74d2ac6ea94060 | """
Python Calculator App
Github: https://github.com/JigarJoshi04/Calculator
"""
from operations import (
addition,
subtraction,
multiplication,
division,
integer_division,
modulo,
power,
log,
sigmoid,
rand_between,
hcf
)
RUNNING = True
while RUNNING:
print("-" * 50)
num1 = int(input("Enter First Integer :--> "))
num2 = int(input("Enter Second Integer :--> "))
print(
"""
Addition --> 1 Subraction --> 2
Multiplication --> 3 Division --> 4
Integer Division --> 5 Power --> 6
Modulo --> 7 Log --> 8
Sigmoid of sum --> 9 Random Number --> 10
Highest common factor --> 11 Exit --> 12
"""
)
operator = int(input("Please Enter Your Choice :--> "))
if operator == 1:
result = addition(num1, num2)
elif operator == 2:
result = subtraction(num1, num2)
elif operator == 3:
result = multiplication(num1, num2)
elif operator == 4:
result = division(num1, num2)
elif operator == 5:
result = integer_division(num1, num2)
elif operator == 6:
result = power(num1, num2)
elif operator == 7:
result = modulo(num1, num2)
elif operator == 8:
result = log(num1, num2)
elif operator == 9:
result = sigmoid(num1 + num2)
elif operator == 10:
result = rand_between(num1, num2)
elif operator == 11:
result = hcf(num1, num2)
elif operator == 12:
break
else:
result = "Enter a valid input. Try again!"
print(f"\nThe output of the selected operation is {result}")
choice = input("\nDo you wish to continue? (y/n) :--> ").lower()
RUNNING = True if choice == 'y' else False
|
py | 1a3e4fdd0639bbd9549ba1e663ec7844ee4c880b | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1Rule(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_groups=None, api_versions=None, resources=None):
"""
V1alpha1Rule - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_groups': 'list[str]',
'api_versions': 'list[str]',
'resources': 'list[str]'
}
self.attribute_map = {
'api_groups': 'apiGroups',
'api_versions': 'apiVersions',
'resources': 'resources'
}
self._api_groups = api_groups
self._api_versions = api_versions
self._resources = resources
@property
def api_groups(self):
"""
Gets the api_groups of this V1alpha1Rule.
APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.
:return: The api_groups of this V1alpha1Rule.
:rtype: list[str]
"""
return self._api_groups
@api_groups.setter
def api_groups(self, api_groups):
"""
Sets the api_groups of this V1alpha1Rule.
APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.
:param api_groups: The api_groups of this V1alpha1Rule.
:type: list[str]
"""
self._api_groups = api_groups
@property
def api_versions(self):
"""
Gets the api_versions of this V1alpha1Rule.
APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.
:return: The api_versions of this V1alpha1Rule.
:rtype: list[str]
"""
return self._api_versions
@api_versions.setter
def api_versions(self, api_versions):
"""
Sets the api_versions of this V1alpha1Rule.
APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.
:param api_versions: The api_versions of this V1alpha1Rule.
:type: list[str]
"""
self._api_versions = api_versions
@property
def resources(self):
"""
Gets the resources of this V1alpha1Rule.
Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required.
:return: The resources of this V1alpha1Rule.
:rtype: list[str]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""
Sets the resources of this V1alpha1Rule.
Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required.
:param resources: The resources of this V1alpha1Rule.
:type: list[str]
"""
self._resources = resources
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1Rule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 1a3e505a4c0ca5a26258c60a5c5c71580cd673fb | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tacotron-2 Config object."""
from tensorflow_tts.configs import BaseConfig
from tensorflow_tts.processor.ljspeech import LJSPEECH_SYMBOLS as lj_symbols
from tensorflow_tts.processor.kss import KSS_SYMBOLS as kss_symbols
from tensorflow_tts.processor.baker import BAKER_SYMBOLS as bk_symbols
from tensorflow_tts.processor.libritts import LIBRITTS_SYMBOLS as lbri_symbols
from tensorflow_tts.processor.synpaflex import SYNPAFLEX_SYMBOLS as synpaflex_symbols
class Tacotron2Config(BaseConfig):
"""Initialize Tacotron-2 Config."""
def __init__(
self,
dataset="ljspeech",
vocab_size=len(lj_symbols),
embedding_hidden_size=512,
initializer_range=0.02,
layer_norm_eps=1e-6,
embedding_dropout_prob=0.1,
n_speakers=5,
n_conv_encoder=3,
encoder_conv_filters=512,
encoder_conv_kernel_sizes=5,
encoder_conv_activation="mish",
encoder_conv_dropout_rate=0.5,
encoder_lstm_units=256,
reduction_factor=5,
n_prenet_layers=2,
prenet_units=256,
prenet_activation="mish",
prenet_dropout_rate=0.5,
n_lstm_decoder=1,
decoder_lstm_units=1024,
attention_type="lsa",
attention_dim=128,
attention_filters=32,
attention_kernel=31,
n_mels=80,
n_conv_postnet=5,
postnet_conv_filters=512,
postnet_conv_kernel_sizes=5,
postnet_dropout_rate=0.1,
):
"""Init parameters for Tacotron-2 model."""
if dataset == "ljspeech":
self.vocab_size = vocab_size
elif dataset == "kss":
self.vocab_size = len(kss_symbols)
elif dataset == "baker":
self.vocab_size = len(bk_symbols)
elif dataset == "libritts":
self.vocab_size = len(lbri_symbols)
elif dataset == "synpaflex":
self.vocab_size = len(synpaflex_symbols)
else:
raise ValueError("No such dataset: {}".format(dataset))
self.embedding_hidden_size = embedding_hidden_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.embedding_dropout_prob = embedding_dropout_prob
self.n_speakers = n_speakers
self.n_conv_encoder = n_conv_encoder
self.encoder_conv_filters = encoder_conv_filters
self.encoder_conv_kernel_sizes = encoder_conv_kernel_sizes
self.encoder_conv_activation = encoder_conv_activation
self.encoder_conv_dropout_rate = encoder_conv_dropout_rate
self.encoder_lstm_units = encoder_lstm_units
# decoder param
self.reduction_factor = reduction_factor
self.n_prenet_layers = n_prenet_layers
self.prenet_units = prenet_units
self.prenet_activation = prenet_activation
self.prenet_dropout_rate = prenet_dropout_rate
self.n_lstm_decoder = n_lstm_decoder
self.decoder_lstm_units = decoder_lstm_units
self.attention_type = attention_type
self.attention_dim = attention_dim
self.attention_filters = attention_filters
self.attention_kernel = attention_kernel
self.n_mels = n_mels
# postnet
self.n_conv_postnet = n_conv_postnet
self.postnet_conv_filters = postnet_conv_filters
self.postnet_conv_kernel_sizes = postnet_conv_kernel_sizes
self.postnet_dropout_rate = postnet_dropout_rate
|
py | 1a3e52011b0e8d3c27aa461fdd1c1612b33d9814 | ## Main
"""
OpenAI gym execution.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import importlib
import json
import logging
import os
import time
import sys
from tensorforce import TensorForceError
from tensorforce.agents import Agent
from tensorforce.execution import Runner
from tensorforce.contrib.openai_gym import OpenAIGym
# python examples/openai_gym.py Pong-ram-v0 -a examples/configs/vpg.json -n examples/configs/mlp2_network.json -e 50000 -m 2000
# python examples/openai_gym.py CartPole-v0 -a examples/configs/vpg.json -n examples/configs/mlp2_network.json -e 2000 -m 200
def main():
parser = argparse.ArgumentParser()
parser.add_argument('gym_id', help="Id of the Gym environment")
parser.add_argument('-i', '--import-modules', help="Import module(s) required for environment")
parser.add_argument('-a', '--agent', help="Agent configuration file")
parser.add_argument('-n', '--network', default=None, help="Network specification file")
parser.add_argument('-e', '--episodes', type=int, default=None, help="Number of episodes")
parser.add_argument('-t', '--timesteps', type=int, default=None, help="Number of timesteps")
parser.add_argument('-m', '--max-episode-timesteps', type=int, default=None, help="Maximum number of timesteps per episode")
parser.add_argument('-d', '--deterministic', action='store_true', default=False, help="Choose actions deterministically")
parser.add_argument('-s', '--save', help="Save agent to this dir")
parser.add_argument('-se', '--save-episodes', type=int, default=100, help="Save agent every x episodes")
parser.add_argument('-l', '--load', help="Load agent from this dir")
parser.add_argument('--monitor', help="Save results to this directory")
parser.add_argument('--monitor-safe', action='store_true', default=False, help="Do not overwrite previous results")
parser.add_argument('--monitor-video', type=int, default=0, help="Save video every x steps (0 = disabled)")
parser.add_argument('--visualize', action='store_true', default=False, help="Enable OpenAI Gym's visualization")
parser.add_argument('-D', '--debug', action='store_true', default=False, help="Show debug outputs")
parser.add_argument('-te', '--test', action='store_true', default=False, help="Test agent without learning.")
parser.add_argument('-sl', '--sleep', type=float, default=None, help="Slow down simulation by sleeping for x seconds (fractions allowed).")
parser.add_argument('--job', type=str, default=None, help="For distributed mode: The job type of this agent.")
parser.add_argument('--task', type=int, default=0, help="For distributed mode: The task index of this agent.")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if args.import_modules is not None:
for module in args.import_modules.split(','):
importlib.import_module(name=module)
environment = OpenAIGym(
gym_id=args.gym_id,
monitor=args.monitor,
monitor_safe=args.monitor_safe,
monitor_video=args.monitor_video,
visualize=args.visualize
)
if args.agent is not None:
with open(args.agent, 'r') as fp:
agent = json.load(fp=fp)
else:
raise TensorForceError("No agent configuration provided.")
if args.network is not None:
with open(args.network, 'r') as fp:
network = json.load(fp=fp)
agent = Agent.from_spec(
spec=agent,
kwargs=dict(
states=environment.states,
actions=environment.actions,
network=network
)
)
else:
logger.info("No network configuration provided.")
agent = Agent.from_spec(
spec=agent,
kwargs=dict(
states=environment.states,
actions=environment.actions
)
)
if args.load:
load_dir = os.path.dirname(args.load)
if not os.path.isdir(load_dir):
raise OSError("Could not load agent from {}: No such directory.".format(load_dir))
agent.restore_model(args.load)
if args.save:
save_dir = os.path.dirname(args.save)
if not os.path.isdir(save_dir):
try:
os.mkdir(save_dir, 0o755)
except OSError:
raise OSError("Cannot save agent to dir {} ()".format(save_dir))
if args.debug:
logger.info("-" * 16)
logger.info("Configuration:")
logger.info(agent)
runner = Runner(
agent=agent,
environment=environment,
repeat_actions=1
)
if args.debug: # TODO: Timestep-based reporting
report_episodes = 1
else:
report_episodes = 100
logger.info("Starting {agent} for Environment '{env}'".format(agent=agent, env=environment))
def episode_finished(r, id_):
if r.episode % report_episodes == 0:
steps_per_second = r.timestep / (time.time() - r.start_time)
logger.info("Finished episode {:d} after {:d} timesteps. Steps Per Second {:0.2f}".format(
r.agent.episode, r.episode_timestep, steps_per_second
))
logger.info("Episode reward: {}".format(r.episode_rewards[-1]))
logger.info("Average of last 500 rewards: {:0.2f}".
format(sum(r.episode_rewards[-500:]) / min(500, len(r.episode_rewards))))
logger.info("Average of last 100 rewards: {:0.2f}".
format(sum(r.episode_rewards[-100:]) / min(100, len(r.episode_rewards))))
if args.save and args.save_episodes is not None and not r.episode % args.save_episodes:
logger.info("Saving agent to {}".format(args.save))
r.agent.save_model(args.save)
return True
runner.run(
num_timesteps=args.timesteps,
num_episodes=args.episodes,
max_episode_timesteps=args.max_episode_timesteps,
deterministic=args.deterministic,
episode_finished=episode_finished,
testing=args.test,
sleep=args.sleep
)
runner.close()
logger.info("Learning finished. Total episodes: {ep}".format(ep=runner.agent.episode))
if __name__ == '__main__':
main()
|
py | 1a3e52f051cec249342d1d188b76ec339df9d25b | ## 이렇게 푸는 방법도 있지만,
##bfs 느낌으로 방문해가면서 P를 들리면 플래그 세우는 방법으로 푸는게 더 빠르다
import sys
sys.stdin = open('18223.txt')
V,E,P = map(int, sys.stdin.readline().split())
gansun = [[0 for _ in range(V+1)] for _ in range(V+1)]
for _ in range(E):
a,b,c = map(int, sys.stdin.readline().split())
gansun[a][b] = c
gansun[b][a] = c
## 1 -> V로 가는 최단 거리를 구함.
## 1 -> P로 가는 최단 거리를 구함. (다익스트라로 같이 구해짐)
## P -> V로 가는 최단 거리를 구함
oneto = [9999999999 for _ in range(V+1)]
pto = [9999999999 for _ in range(V+1)]
visited = [0 for _ in range(V+1)]
oneto[1] = 0
for _ in range(V):
val = 9999999999
i = 9999999999
for i in range(1,V+1):
if not visited[i] and val > oneto[i]:
val = oneto[i]
nn = i
visited[nn] = 1
if val == 9999999999:
break
for i in range(1,V+1):
if gansun[nn][i] :
if oneto[i] > oneto[nn]+ gansun[nn][i]:
oneto[i] = oneto[nn] + gansun[nn][i]
visited = [0 for _ in range(V+1)]
pto[P] = 0
for _ in range(V):
val = 9999999999
i = 9999999999
for i in range(1,V+1):
if not visited[i] and val > oneto[i]:
val = pto[i]
nn = i
visited[nn] = 1
if val == 9999999999:
break
for i in range(1,V+1):
if gansun[nn][i] :
if pto[i] > pto[nn]+ gansun[nn][i]:
pto[i] = pto[nn] + gansun[nn][i]
if oneto[P]+pto[V] == oneto[V]:
print('SAVE HIM')
else:
print('GOOD BYE') |
py | 1a3e52f7c9416d5c06f86f00afe81ede983011db |
from pythonforandroid.toolchain import NDKRecipe, shprint, current_directory, info_main
from os.path import exists, join
import sh
class FontconfigRecipe(NDKRecipe):
version = "really_old"
url = 'https://github.com/vault/fontconfig/archive/androidbuild.zip'
depends = ['sdl2']
dir_name = 'fontconfig'
def build_arch(self, arch):
env = self.get_recipe_env(arch)
with current_directory(self.get_jni_dir()):
shprint(sh.ndk_build, "V=1", 'fontconfig', _env=env)
recipe = FontconfigRecipe()
|
py | 1a3e53695de9f19018e98e1195b10802f116a168 | #!/usr/bin/python3
DATABASE = "./vocabulary.db"
WORD_TYPE_CUTOFFED = 1
WORD_TYPE_CUTOFF = 0
WORD_RIGHT_YES = 1
WORD_RIGHT_NO = 0
# wrong words review frequency, N means wrong words will occur each N times.
WRONG_WORDS_REVIEW_FREQUENCY = 3
# highlight and with font color red.
RECITE_PRINT_FORMAT = "[{}]> \33[1m\33[31m{}\33[0m\33[0m"
# answer with font color green
PRINT_VOCABULARY_DESC = "[Answer]\33[1m\33[32m{}\33[0m\33[0m"
# details in database
DETAILS_PRINT_FORMAT_IN_DATABASE = "[Detail]ID:\33[1m\33[32m{}\33[0m\33[0m\t desc=\33[1m\33[32m{}\33[0m\33[0m\tcutoff=\33[1m\33[32m{}\33[0m\33[0m"
# help information
RECITE_HELP_INFORMATION = """
COMMANDS OF RECITING VOCABULARIES
`\33[1m\33[33myes\33[0m\33[0m` : i have know this vocabulary, just pass it.
`\33[1m\33[33mno\33[0m\33[0m` : i don't know this vocabulary, tell me the meaning.
`\33[1m\33[33msay\33[0m\33[0m` : play the audio by using system setting.
`\33[1m\33[33mcutoff\33[0m\33[0m` : never show this vocabulary again.
`\33[1m\33[33mrepeat\33[0m\33[0m` : repeat current word and stay in this round.
`\33[1m\33[33mshow \33[0m\33[0m` : show details in database.
`\33[1m\33[33mfind= \33[0m\33[0m` : find=xxx, get the meaning in database with key=xxx
`\33[1m\33[33mstatic=N\33[0m\33[0m`: static=N, show the statics of N days ago. N=0 means current day. N <= 0
`\33[1m\33[33mwrong=N \33[0m\33[0m`: wrong=N, show the wrong words of N days ago. N=0 means current day. N <= Zero
`\33[1m\33[33mquit\33[0m\33[0m` : quit this recite round.
`\33[1m\33[33mexport=N\33[0m\33[0m`: export=N, for exporting wrong words of N days ago.
`\33[1m\33[36mhelp\33[0m\33[0m` : tip me with keywords.
example:
>say say repeat
>say show
>find=hello
"""
# sub commands to act for reciting words.
COMMANDS_HELP = "help"
COMMANDS_SAY = "say"
COMMANDS_CUTOFF = "cutoff"
COMMANDS_REPEAT = "repeat"
COMMANDS_SHOW = "show"
COMMANDS_FIND = "find="
COMMANDS_STATIC = "static="
COMMANDS_WRONG = "wrong="
COMMANDS_EXPORT = "export="
COMMANDS_YES = "yes"
COMMANDS_NO = "no"
COMMANDS_QUIT = "quit" |
py | 1a3e5371124e25c0ea7eacfe9feb73555105592d | # Day 10: The Stars Align
class Particle:
def __init__(self, x, y, vx, vy):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
def tick(self):
self.x += self.vx
self.y += self.vy
def __eq__(self, other):
return self.x == other.x and self.y == other.y
POINTS = [
Particle(-10810, 43870, 1, -4),
Particle(-21745, -10795, 2, 1),
Particle(54771, -54515, -5, 5),
Particle(54792, -32660, -5, 3),
Particle(21972, 54799, -2, -5),
Particle(-43565, -43583, 4, 4),
Particle(54775, 43864, -5, -4),
Particle(54819, -54516, -5, 5),
Particle(21981, 32941, -2, -3),
Particle(-32642, 43871, 3, -4),
Particle(-43614, -54516, 4, 5),
Particle(-21758, -43590, 2, 4),
Particle(-43589, -54520, 4, 5),
Particle(-10818, 43864, 1, -4),
Particle(-32646, -43584, 3, 4),
Particle(54814, -54523, -5, 5),
Particle(43877, -43592, -4, 4),
Particle(54766, 32933, -5, -3),
Particle(-21749, 22006, 2, -2),
Particle(-32658, -43588, 3, 4),
Particle(-54544, -54524, 5, 5),
Particle(-43590, -21725, 4, 2),
Particle(-32638, 11076, 3, -1),
Particle(43861, 32941, -4, -3),
Particle(-21699, -32656, 2, 3),
Particle(11089, 11068, -1, -1),
Particle(-10805, 22000, 1, -2),
Particle(-43598, 54797, 4, -5),
Particle(11065, -21728, -1, 2),
Particle(54768, 54799, -5, -5),
Particle(-10810, 43871, 1, -4),
Particle(54803, 22001, -5, -2),
Particle(11081, -32651, -1, 3),
Particle(-10782, -10792, 1, 1),
Particle(-10824, -21722, 1, 2),
Particle(-54529, -21728, 5, 2),
Particle(32918, -10796, -3, 1),
Particle(-10773, 32938, 1, -3),
Particle(-21706, -21723, 2, 2),
Particle(-32664, 43864, 3, -4),
Particle(-43578, 54802, 4, -5),
Particle(11082, 22007, -1, -2),
Particle(43838, -10795, -4, 1),
Particle(54766, 43871, -5, -4),
Particle(22007, -54522, -2, 5),
Particle(-43601, -21726, 4, 2),
Particle(-10825, -10791, 1, 1),
Particle(32950, -43584, -3, 4),
Particle(-32669, 32939, 3, -3),
Particle(-43564, -54524, 4, 5),
Particle(22014, 32934, -2, -3),
Particle(-10797, -43584, 1, 4),
Particle(-10826, -32655, 1, 3),
Particle(-32690, -10791, 3, 1),
Particle(11059, -43588, -1, 4),
Particle(-43598, 43869, 4, -4),
Particle(32931, 43871, -3, -4),
Particle(32927, 32941, -3, -3),
Particle(-10798, 11068, 1, -1),
Particle(32938, -54524, -3, 5),
Particle(-32686, -32659, 3, 3),
Particle(-43601, -54522, 4, 5),
Particle(-32633, 11074, 3, -1),
Particle(54766, 11068, -5, -1),
Particle(11054, -10794, -1, 1),
Particle(-10805, -10791, 1, 1),
Particle(-54533, 43872, 5, -4),
Particle(-43590, 11076, 4, -1),
Particle(32954, -54524, -3, 5),
Particle(43877, -54524, -4, 5),
Particle(21982, -43589, -2, 4),
Particle(-10824, -54518, 1, 5),
Particle(-32666, -54521, 3, 5),
Particle(-10789, -54523, 1, 5),
Particle(32936, -10796, -3, 1),
Particle(43850, 22008, -4, -2),
Particle(43870, -21722, -4, 2),
Particle(11059, -10792, -1, 1),
Particle(43875, -32651, -4, 3),
Particle(54769, -32653, -5, 3),
Particle(-32633, -43586, 3, 4),
Particle(11046, 22007, -1, -2),
Particle(-10794, -54515, 1, 5),
Particle(-43598, -21725, 4, 2),
Particle(11083, -32660, -1, 3),
Particle(-54504, -21728, 5, 2),
Particle(43842, -32651, -4, 3),
Particle(-54533, -10794, 5, 1),
Particle(-43566, 22008, 4, -2),
Particle(54817, 43873, -5, -4),
Particle(-21742, -21725, 2, 2),
Particle(43836, -10793, -4, 1),
Particle(-32634, -21719, 3, 2),
Particle(32918, 54800, -3, -5),
Particle(32923, 22000, -3, -2),
Particle(54774, -32651, -5, 3),
Particle(11091, -32651, -1, 3),
Particle(32902, -32654, -3, 3),
Particle(-21714, -54521, 2, 5),
Particle(-21755, -21726, 2, 2),
Particle(-21707, 22009, 2, -2),
Particle(11099, -54522, -1, 5),
Particle(54766, 11071, -5, -1),
Particle(54803, -54521, -5, 5),
Particle(11086, -54523, -1, 5),
Particle(-32641, 11068, 3, -1),
Particle(-10816, -54519, 1, 5),
Particle(21991, -54516, -2, 5),
Particle(-10810, 22009, 1, -2),
Particle(-21729, 32933, 2, -3),
Particle(-21734, -10793, 2, 1),
Particle(-32674, -10790, 3, 1),
Particle(21970, -21722, -2, 2),
Particle(54802, -10792, -5, 1),
Particle(-43564, 43864, 4, -4),
Particle(43878, -21720, -4, 2),
Particle(32912, 54801, -3, -5),
Particle(-32674, -10788, 3, 1),
Particle(-43611, -32656, 4, 3),
Particle(-43572, 43873, 4, -4),
Particle(-21726, 32932, 2, -3),
Particle(32926, -43590, -3, 4),
Particle(54814, -10789, -5, 1),
Particle(43834, 54796, -4, -5),
Particle(-43578, -32652, 4, 3),
Particle(-43590, -54523, 4, 5),
Particle(-21721, 22009, 2, -2),
Particle(11043, 22000, -1, -2),
Particle(-32678, 54796, 3, -5),
Particle(-54496, 22005, 5, -2),
Particle(-32669, -54516, 3, 5),
Particle(-54525, 32933, 5, -3),
Particle(22023, -32659, -2, 3),
Particle(43866, 43869, -4, -4),
Particle(22031, -54523, -2, 5),
Particle(11067, 22006, -1, -2),
Particle(-43606, 43865, 4, -4),
Particle(-21737, 11075, 2, -1),
Particle(-54510, -43590, 5, 4),
Particle(32918, 54801, -3, -5),
Particle(-10823, -10789, 1, 1),
Particle(22027, 54805, -2, -5),
Particle(-21721, -43589, 2, 4),
Particle(11054, 11069, -1, -1),
Particle(-32656, -32656, 3, 3),
Particle(54766, 11077, -5, -1),
Particle(22029, -32660, -2, 3),
Particle(32942, -10789, -3, 1),
Particle(-43570, -32655, 4, 3),
Particle(32926, 43871, -3, -4),
Particle(-43590, -21722, 4, 2),
Particle(32926, -54522, -3, 5),
Particle(-21714, -43587, 2, 4),
Particle(-21710, -54520, 2, 5),
Particle(32952, 43864, -3, -4),
Particle(-54536, 22004, 5, -2),
Particle(32913, 11068, -3, -1),
Particle(11098, 22009, -1, -2),
Particle(-10817, 11068, 1, -1),
Particle(-54493, 43866, 5, -4),
Particle(43876, -32651, -4, 3),
Particle(-43589, 32932, 4, -3),
Particle(32955, -32652, -3, 3),
Particle(21999, -10787, -2, 1),
Particle(21982, 32941, -2, -3),
Particle(43883, -54524, -4, 5),
Particle(-54533, -32651, 5, 3),
Particle(32953, 43869, -3, -4),
Particle(-21750, 54804, 2, -5),
Particle(-54554, -54520, 5, 5),
Particle(-21723, 22000, 2, -2),
Particle(32918, -10789, -3, 1),
Particle(-10801, -21728, 1, 2),
Particle(-43561, 54805, 4, -5),
Particle(-10802, 22004, 1, -2),
Particle(22002, 32938, -2, -3),
Particle(11097, -32651, -1, 3),
Particle(-21724, 22000, 2, -2),
Particle(-54543, -43592, 5, 4),
Particle(54775, 54796, -5, -5),
Particle(11066, 54796, -1, -5),
Particle(54814, 54798, -5, -5),
Particle(32927, -43592, -3, 4),
Particle(54803, 43872, -5, -4),
Particle(-43569, -54516, 4, 5),
Particle(-54498, 54796, 5, -5),
Particle(54786, 43868, -5, -4),
Particle(43850, 43873, -4, -4),
Particle(-43569, 11074, 4, -1),
Particle(-21714, 22000, 2, -2),
Particle(54809, 32941, -5, -3),
Particle(-43590, -32653, 4, 3),
Particle(-54535, 54800, 5, -5),
Particle(43863, 22007, -4, -2),
Particle(43871, -54516, -4, 5),
Particle(-10810, -10789, 1, 1),
Particle(-43605, 43868, 4, -4),
Particle(32942, 22008, -3, -2),
Particle(-21701, -54524, 2, 5),
Particle(-21722, -21721, 2, 2),
Particle(-32666, -43590, 3, 4),
Particle(32926, -10792, -3, 1),
Particle(22011, -10787, -2, 1),
Particle(43846, 32935, -4, -3),
Particle(22026, -10788, -2, 1),
Particle(-32682, -54515, 3, 5),
Particle(-32666, -10792, 3, 1),
Particle(11071, 22004, -1, -2),
Particle(-10790, -43585, 1, 4),
Particle(32934, -43590, -3, 4),
Particle(-32662, -21723, 3, 2),
Particle(54790, -10790, -5, 1),
Particle(11082, 22002, -1, -2),
Particle(54766, 54801, -5, -5),
Particle(54794, -43584, -5, 4),
Particle(-10789, 54805, 1, -5),
Particle(-43622, -21719, 4, 2),
Particle(-32682, 54796, 3, -5),
Particle(43847, 22009, -4, -2),
Particle(43890, 22007, -4, -2),
Particle(-21742, 43867, 2, -4),
Particle(22023, 43869, -2, -4),
Particle(43858, 43872, -4, -4),
Particle(-21697, -43583, 2, 4),
Particle(43871, 32933, -4, -3),
Particle(21980, 11077, -2, -1),
Particle(43871, 32935, -4, -3),
Particle(-32669, -21727, 3, 2),
Particle(-10774, 11073, 1, -1),
Particle(-43594, -32655, 4, 3),
Particle(-21742, 32940, 2, -3),
Particle(-43595, 11077, 4, -1),
Particle(-54510, 22005, 5, -2),
Particle(32912, 32932, -3, -3),
Particle(-54493, 32932, 5, -3),
Particle(-54497, -10796, 5, 1),
Particle(32923, 43868, -3, -4),
Particle(-10815, -43588, 1, 4),
Particle(32915, -21728, -3, 2),
Particle(32931, 54805, -3, -5),
Particle(32906, 22001, -3, -2),
Particle(21986, -10796, -2, 1),
Particle(-54526, -43592, 5, 4),
Particle(54822, 22000, -5, -2),
Particle(32926, -43585, -3, 4),
Particle(32931, -10791, -3, 1),
Particle(-54496, 43869, 5, -4),
Particle(22005, 11073, -2, -1),
Particle(-54525, 54805, 5, -5),
Particle(-21710, 43867, 2, -4),
Particle(54814, -43586, -5, 4),
Particle(22002, 11074, -2, -1),
Particle(-43606, -10787, 4, 1),
Particle(32913, -54524, -3, 5),
Particle(32928, -43583, -3, 4),
Particle(-10826, 32939, 1, -3),
Particle(-10769, -10787, 1, 1),
Particle(-43574, 43870, 4, -4),
Particle(-32632, -54515, 3, 5),
Particle(-43613, 32941, 4, -3),
Particle(11086, -43590, -1, 4),
Particle(-10773, -54517, 1, 5),
Particle(-54527, -43587, 5, 4),
Particle(22014, -21727, -2, 2),
Particle(32923, 22005, -3, -2),
Particle(-54493, 43865, 5, -4),
Particle(-54541, 43866, 5, -4),
Particle(22019, 32941, -2, -3),
Particle(11067, -54516, -1, 5),
Particle(32912, -21723, -3, 2),
Particle(-10826, 32939, 1, -3),
Particle(-32637, -32659, 3, 3),
Particle(-43574, 11075, 4, -1),
Particle(43834, -54521, -4, 5),
Particle(-43563, -32660, 4, 3),
Particle(-43601, -21725, 4, 2),
Particle(-32647, 54796, 3, -5),
Particle(-21758, -43592, 2, 4),
Particle(54766, 32938, -5, -3),
Particle(32946, -54524, -3, 5),
Particle(-21734, 11074, 2, -1),
Particle(43892, -10787, -4, 1),
Particle(11047, -10790, -1, 1),
Particle(-10794, -43590, 1, 4),
Particle(54793, 11077, -5, -1),
Particle(-32633, 22000, 3, -2),
Particle(-10770, -54516, 1, 5),
Particle(-21749, 54802, 2, -5),
Particle(54782, 11073, -5, -1),
Particle(-43564, -32651, 4, 3),
Particle(-54530, -21721, 5, 2),
Particle(11096, -32655, -1, 3),
Particle(-54525, 54803, 5, -5),
Particle(43863, -21720, -4, 2),
Particle(-32673, -54520, 3, 5),
Particle(-43562, 11071, 4, -1),
Particle(32923, 43870, -3, -4),
Particle(54791, 43873, -5, -4),
Particle(54815, -10787, -5, 1),
Particle(-32634, -32653, 3, 3),
Particle(-54506, -54521, 5, 5),
Particle(-10821, 54796, 1, -5),
Particle(-32653, 32941, 3, -3),
Particle(32950, 43869, -3, -4),
Particle(43895, 54805, -4, -5),
Particle(-54506, 54804, 5, -5),
Particle(-43618, -10788, 4, 1),
Particle(22013, 11077, -2, -1),
Particle(-21746, -32651, 2, 3),
Particle(-43619, -54522, 4, 5),
Particle(22021, 54801, -2, -5),
Particle(43858, 54804, -4, -5),
Particle(43887, 11074, -4, -1),
Particle(-43602, -43588, 4, 4),
Particle(43835, 32936, -4, -3),
Particle(-10823, -54517, 1, 5),
Particle(43834, -43584, -4, 4),
Particle(43883, 11068, -4, -1),
Particle(-43561, -10794, 4, 1),
Particle(11070, -10788, -1, 1),
Particle(11098, 22000, -1, -2),
Particle(-10767, 22000, 1, -2),
Particle(54787, -54517, -5, 5),
Particle(22002, 54804, -2, -5),
Particle(-54543, 22009, 5, -2),
Particle(43869, -10792, -4, 1),
Particle(-10797, -43591, 1, 4),
Particle(54806, 11075, -5, -1),
Particle(43834, -32652, -4, 3),
Particle(54766, 54799, -5, -5),
Particle(54790, -10790, -5, 1),
Particle(32955, 43871, -3, -4)
]
EXAMPLE = [
Particle(9, 1, 0, 2),
Particle(7, 0, -1, 0),
Particle(3, -2, -1, 1),
Particle(6, 10, -2, -1),
Particle(2, -4, 2, 2),
Particle(-6, 10, 2, -2),
Particle(1, 8, 1, -1),
Particle(1, 7, 1, 0),
Particle(-3, 11, 1, -2),
Particle(7, 6, -1, -1),
Particle(-2, 3, 1, 0),
Particle(-4, 3, 2, 0),
Particle(10, -3, -1, 1),
Particle(5, 11, 1, -2),
Particle(4, 7, 0, -1),
Particle(8, -2, 0, 1),
Particle(15, 0, -2, 0),
Particle(1, 6, 1, 0),
Particle(8, 9, 0, -1),
Particle(3, 3, -1, 1),
Particle(0, 5, 0, -1),
Particle(-2, 2, 2, 0),
Particle(5, -2, 1, 2),
Particle(1, 4, 2, 1),
Particle(-2, 7, 2, -2),
Particle(3, 6, -1, -1),
Particle(5, 0, 1, 0),
Particle(-6, 0, 2, 0),
Particle(5, 9, 1, -2),
Particle(14, 7, -2, 0),
Particle(-3, 6, 2, -1)
]
def simulate(ps: list, rp: int = 1):
for t in range(rp):
for p in ps:
p.tick()
draw_particles(ps, t)
def draw_particles(ps: list, t: int, w: int = 100, h: int = 10):
image = [['.' for i in range(w)] for j in range(h)]
minX, maxX = min(map(lambda p: p.x, ps)), max(map(lambda p: p.x, ps))
minY, maxY = min(map(lambda p: p.y, ps)), max(map(lambda p: p.y, ps))
if maxX - minX < w and maxY - minY < h:
for p in ps:
image[p.y - minY][p.x - minX] = '#'
print('Range: %d / %d, Time: %d' % (maxX - minX, maxY - minY, t))
print('='*w + '\n' + '\n'.join([''.join(i) for i in image]) + '\n' + '='*w + '\n')
if __name__ == '__main__':
simulate(POINTS, 20000)
|
py | 1a3e53aa8ca72cf9c91801431a76ac507cd154b0 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.rpg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import tensorflow as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import policy_gradient
from open_spiel.python.algorithms.losses import rl_losses
import pyspiel
class PolicyGradientTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
itertools.product(("rpg", "qpg", "rm", "a2c"),
("kuhn_poker", "leduc_poker")))
def test_run_game(self, loss_str, game_name):
env = rl_environment.Environment(game_name)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
with self.session() as sess:
agents = [
policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension
sess,
player_id=player_id,
info_state_size=info_state_size,
num_actions=num_actions,
loss_str=loss_str,
hidden_layers_sizes=[8, 8],
batch_size=16,
entropy_cost=0.001,
critic_learning_rate=0.01,
pi_learning_rate=0.01,
num_critic_before_pi=4) for player_id in [0, 1]
]
sess.run(tf.global_variables_initializer())
for _ in range(2):
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
def test_run_hanabi(self):
# Hanabi is an optional game, so check we have it before running the test.
game = "hanabi"
if game not in pyspiel.registered_names():
return
num_players = 3
env_configs = {
"players": num_players,
"max_life_tokens": 1,
"colors": 2,
"ranks": 3,
"hand_size": 2,
"max_information_tokens": 3,
"discount": 0.
}
env = rl_environment.Environment(game, **env_configs)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
with self.session() as sess:
agents = [
policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension
sess,
player_id=player_id,
info_state_size=info_state_size,
num_actions=num_actions,
hidden_layers_sizes=[8, 8],
batch_size=16,
entropy_cost=0.001,
critic_learning_rate=0.01,
pi_learning_rate=0.01,
num_critic_before_pi=4) for player_id in range(num_players)
]
sess.run(tf.global_variables_initializer())
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
agent_output = [agent.step(time_step) for agent in agents]
time_step = env.step([agent_output[current_player].action])
for agent in agents:
agent.step(time_step)
def test_loss_modes(self):
loss_dict = {
"qpg": rl_losses.BatchQPGLoss,
"rpg": rl_losses.BatchRPGLoss,
"rm": rl_losses.BatchRMLoss,
"a2c": rl_losses.BatchA2CLoss,
}
with self.session() as sess:
for loss_str, loss_class in loss_dict.items():
agent_by_str = policy_gradient.PolicyGradient(
sess,
player_id=0,
info_state_size=32,
num_actions=2,
loss_str=loss_str,
loss_class=None)
agent_by_class = policy_gradient.PolicyGradient(
sess,
player_id=0,
info_state_size=32,
num_actions=2,
loss_str=None,
loss_class=loss_class)
self.assertEqual(agent_by_str._pi_loss.shape,
agent_by_class._pi_loss.shape)
self.assertEqual(agent_by_str._pi_loss.dtype,
agent_by_class._pi_loss.dtype)
self.assertEqual(agent_by_str._pi_loss.op.type,
agent_by_class._pi_loss.op.type)
if __name__ == "__main__":
tf.test.main()
|
py | 1a3e542396420699fc7d93bfb3e45abd36276781 | print("HELLO EVERYONE!!!!")
|
py | 1a3e54a4f8134b23bfe2d6d75471663a23358c43 | # 1080
n, m = map(int, input().split())
cnt = 0
normal = [list(map(int, list(input()))) for _ in range(n)]
comp = [list(map(int, list(input()))) for _ in range(n)]
def change(a, b):
for i in range(a, a+3):
for j in range(b, b+3):
if normal[i][j] == 1:
normal[i][j] = 0
else:
normal[i][j] = 1
for i in range(0, n-2):
for j in range(0, m-2):
if normal[i][j] != comp[i][j]:
change(i, j)
cnt += 1
print(normal, comp)
for i in range(0, n):
for j in range(0, m):
if normal[i][j] != comp[i][j]:
cnt = -1
break
print(cnt)
|
py | 1a3e554f38a90f468c72ae360cb01d2759a62638 | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import cast, Any, Dict
import streamlit
import json
from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as PydeckProto
class PydeckMixin:
def pydeck_chart(self, pydeck_obj=None, use_container_width=False):
"""Draw a chart using the PyDeck library.
This supports 3D maps, point clouds, and more! More info about PyDeck
at https://deckgl.readthedocs.io/en/latest/.
These docs are also quite useful:
- DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs
- DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json
When using this command, we advise all users to use a personal Mapbox
token. This ensures the map tiles used in this chart are more
robust. You can do this with the mapbox.token config option.
To get a token for yourself, create an account at
https://mapbox.com. It's free! (for moderate usage levels). For more info
on how to set config options, see
https://docs.streamlit.io/library/advanced-features/configuration#set-configuration-options
Parameters
----------
spec: pydeck.Deck or None
Object specifying the PyDeck chart to draw.
Example
-------
Here's a chart using a HexagonLayer and a ScatterplotLayer on top of
the light map style:
>>> df = pd.DataFrame(
... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],
... columns=['lat', 'lon'])
>>>
>>> st.pydeck_chart(pdk.Deck(
... map_style='mapbox://styles/mapbox/light-v9',
... initial_view_state=pdk.ViewState(
... latitude=37.76,
... longitude=-122.4,
... zoom=11,
... pitch=50,
... ),
... layers=[
... pdk.Layer(
... 'HexagonLayer',
... data=df,
... get_position='[lon, lat]',
... radius=200,
... elevation_scale=4,
... elevation_range=[0, 1000],
... pickable=True,
... extruded=True,
... ),
... pdk.Layer(
... 'ScatterplotLayer',
... data=df,
... get_position='[lon, lat]',
... get_color='[200, 30, 0, 160]',
... get_radius=200,
... ),
... ],
... ))
.. output::
https://static.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i
height: 530px
"""
pydeck_proto = PydeckProto()
marshall(pydeck_proto, pydeck_obj, use_container_width)
return self.dg._enqueue("deck_gl_json_chart", pydeck_proto)
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self)
# Map used when no data is passed.
EMPTY_MAP: Dict[str, Any] = {
"initialViewState": {"latitude": 0, "longitude": 0, "pitch": 0, "zoom": 1}
}
def marshall(pydeck_proto, pydeck_obj, use_container_width):
if pydeck_obj is None:
spec = json.dumps(EMPTY_MAP)
else:
spec = pydeck_obj.to_json()
pydeck_proto.json = spec
pydeck_proto.use_container_width = use_container_width
if pydeck_obj is not None and isinstance(pydeck_obj.deck_widget.tooltip, dict):
pydeck_proto.tooltip = json.dumps(pydeck_obj.deck_widget.tooltip)
|
py | 1a3e5668839906af49727f1f0b57a709ff7cf289 | #!env python
from importlib import import_module
import sys
import logging
logger = logging.getLogger('root')
import canmatrix
import os
if sys.version_info > (3, 0):
import io
else:
import StringIO
moduleList = ["arxml", "cmcsv", "dbc", "dbf", "cmjson",
"kcd", "fibex", "sym", "xls", "xlsx", "yaml"]
loadedFormats = []
supportedFormats = {}
extensionMapping = {}
for module in moduleList:
try:
import_module("canmatrix." + module)
loadedFormats.append(module)
except ImportError:
logger.info("%s is not supported", module)
for loadedModule in loadedFormats:
supportedFormats[loadedModule] = []
moduleInstance = sys.modules["canmatrix." + loadedModule]
if "load" in dir(moduleInstance):
supportedFormats[loadedModule].append("load")
if "dump" in dir(moduleInstance):
supportedFormats[loadedModule].append("dump")
if "clusterImporter" in dir(moduleInstance):
supportedFormats[loadedModule].append("clusterImporter")
if "clusterExporter" in dir(moduleInstance):
supportedFormats[loadedModule].append("clusterExporter")
if "extension" in dir(moduleInstance):
supportedFormats[loadedModule].append("extension")
extensionMapping[loadedModule] = moduleInstance.extension
else:
extensionMapping[loadedModule] = loadedModule
def loads(string, importType=None, key="", flatImport=None, encoding="utf-8",**options):
if sys.version_info > (3, 0):
if type(string) == str:
string = bytes(string, encoding)
fileObject = io.BytesIO(string)
else:
fileObject = StringIO.StringIO(string)
return load(fileObject, importType, key, flatImport, **options)
def loadp(path, importType=None, key="", flatImport=None, **options):
with open(path, "rb") as fileObject:
if not importType:
for supportedImportType, extension in extensionMapping.items():
if path.endswith(extension) and "load" in supportedFormats[supportedImportType]:
importType = supportedImportType
break
if importType:
return load(fileObject, importType, key, flatImport, **options)
else:
logger.error("This file format is not supported for reading")
return None
return None
def load(fileObject, importType, key="", flatImport=None, **options):
dbs = {}
moduleInstance = sys.modules["canmatrix." + importType]
if "clusterImporter" in supportedFormats[importType]:
dbs = moduleInstance.load(fileObject, **options)
else:
dbs[key] = moduleInstance.load(fileObject, **options)
if flatImport:
for key in dbs:
return dbs[key]
else:
return dbs
def dump(canMatrixOrCluster, fileObject, exportType, **options):
moduleInstance = sys.modules["canmatrix." + exportType]
if (sys.version_info > (3, 0) and type(canmatrix.canmatrix.CanMatrix()) == type(canMatrixOrCluster)) or \
(sys.version_info < (3, 0) and type(canmatrix.CanMatrix()) == type(canMatrixOrCluster)):
moduleInstance.dump(canMatrixOrCluster, fileObject, **options)
elif "clusterExporter" in supportedFormats[exportType]:
moduleInstance.dump(canMatrixOrCluster, fileObject, **options)
def dumpp(canCluster, path, exportType=None, **options):
if not exportType:
for key, extension in extensionMapping.items():
if path.endswith("." + extension) and "dump" in supportedFormats[key]:
exportType = key
break
if exportType:
if "clusterExporter" in supportedFormats[exportType]:
fileObject = open(path, "wb")
dump(canCluster, fileObject, exportType, **options)
else:
for name in canCluster:
if len(name) > 0:
(filepath, ext) = os.path.splitext(path)
outfile = filepath + "_" + name + ext
else:
outfile = path
db = canCluster[name]
fileObject = open(outfile, "wb")
dump(db, fileObject, exportType, **options)
fileObject.close()
else:
logger.error("This file format is not supported for writing")
return None
|
py | 1a3e56f54c8411727f0a1bc980e876b82a1b6321 | from functools import partial
from urllib.parse import urlunsplit, urlencode
from strava.base import RequestHandler
from strava.constants import APPROVAL_PROMPT, SCOPE, DEFAULT_VERIFY_TOKEN
from strava.helpers import BatchIterator, from_datetime_to_epoch
class StravaApiClientV3(RequestHandler):
api_path = 'api/v3/'
def __init__(self, access_token=None):
self.access_token = access_token
@classmethod
def authorization_url(cls, client_id, redirect_uri, approval_prompt=None, scope=None, state=None, mobile=False):
"""
Returns the Strava authorization URL.
See docs: https://developers.strava.com/docs/authentication/
:param client_id [str]: Strava Client ID.
:param redirect_uri [str]: URI that the user will be redirected after authetication.
:param approval_prompt [str]: indicates if Strava should show the autorization prompt to the user
:param scope [Sequence[str]]: list/tuple of the requested scope.
:params state [str]: A value to be returned in the redirect URI.
:param mobile [bool]: Indicates if the user should be redirect to the mobile page or not.
"""
oauth_path = 'oauth/authorize/'
mobile_oauth_path = 'oauth/mobile/authorize/'
approval_prompt = approval_prompt or APPROVAL_PROMPT.AUTO
assert approval_prompt in APPROVAL_PROMPT, (
"Invalid value for 'approval_prompt': '{}'".format(approval_prompt),
"Valid values are: {}".format([items for items in APPROVAL_PROMPT.values()])
)
scope = scope or [SCOPE.READ, SCOPE.ACTIVITY_READ_ALL]
invalid_scope = set(scope) - set(SCOPE.values())
assert not invalid_scope, (
"Invalid value for 'scope': {}".format(invalid_scope),
"Valid values are: {}".format(SCOPE.values())
)
qs = {
'client_id': client_id,
'redirect_uri': redirect_uri,
'response_type': 'code',
'approval_prompt': approval_prompt,
'scope': ','.join(scope)
}
if state:
assert isinstance(state, str), "Invalid value for 'state'. This value must be str."
qs['state'] = state
path = mobile_oauth_path if mobile else oauth_path
return urlunsplit(('https', cls.api_domain, path, urlencode(qs), ''))
def subscribe_webhook(self, client_id, client_secret, callback_url, verify_token=DEFAULT_VERIFY_TOKEN):
path = 'push_subscriptions/'
params = {
'client_id': client_id,
'client_secret': client_secret,
'callback_url': callback_url,
'verify_token': verify_token
}
return self._dispatcher('post', path, is_webhook=True, **params)
def validate_webhook_subscription(self, hub_mode, hub_challenge, verify_token=None):
assert hub_mode == 'subscribe', "Invalid 'hub_mode'."
if verify_token:
assert verify_token == DEFAULT_VERIFY_TOKEN, "Invalid 'verify token'."
return {"hub.challenge": hub_challenge}
def check_webhook_subscription(self, client_id, client_secret):
path = 'push_subscriptions/'
params = {'client_id': client_id, 'client_secret': client_secret}
return self._dispatcher('get', path, is_webhook=True, **params)
def delete_webhook_subscription(self, subscription_id, client_id, client_secret):
path = 'push_subscriptions/'
params = {'id': subscription_id, 'client_id': client_id, 'client_secret': client_secret}
return self._dispatcher('delete', path, is_webhook=True, **params)
def exchange_token(self, client_id, client_secret, code):
"""
Exchange the authorization code (received from Strava) for the token.
See docs: https://developers.strava.com/docs/authentication/
:param client_id [str]: Strava Client ID
:param client_secret [str]: Strava Client Secret
:param code [str]: Temporary authorization code received by Strava.
"""
path = 'oauth/token/'
params = {
'client_id': client_id,
'client_secret': client_secret,
'code': code,
'grant_type': 'authorization_code'
}
data = self._dispatcher('post', path, **params)
self.access_token = data['access_token']
return data
def refresh_token(self, client_id, client_secret, refresh_token):
"""
Get the new access token and refresh token from Strava given a refresh token.
See docs: https://developers.strava.com/docs/authentication/
:param client_id [str]: Strava Client ID
:param client_secret [str]: Strava Client Secret
:param refresh_token [str]: Refresh token received by Strava.
"""
path = 'oauth/token/'
params = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
'refresh_token': refresh_token
}
data = self._dispatcher('post', path, **params)
self.access_token = data['access_token']
return data
def deauthorize(self, access_token):
"""
Deauthorize the application.
See docs: https://developers.strava.com/docs/authentication/
"""
path = 'oauth/deauthorize/'
self._dispatcher('post', path, access_token=access_token)
def get_athlete_profile(self):
"""
Return the profile of the authenticated user (access_token owner).
See docs: http://developers.strava.com/docs/reference/#api-Athletes-getLoggedInAthlete
"""
path = 'athlete/'
return self._dispatcher('get', path)
def get_activities(self, before=None, after=None, per_page=50, limit=None):
"""
Get the athele activities
See docs: http://developers.strava.com/docs/reference/#api-Activities-getLoggedInAthleteActivities
:param before [datetime]: datetime to use for filtering activities that have taken place before a certain time
:param after [datetime]: datetime to use for filtering activities that have taken place after a certain time
:param per_page [int]: page size
:param limit [int]: maximum number of activities to fetch
Note: 'before' and 'after' will be considered in UTC.
"""
path = 'athlete/activities/'
params = {}
if before:
params['before'] = from_datetime_to_epoch(before)
if after:
params['after'] - from_datetime_to_epoch(after)
fetcher = partial(self._dispatcher, 'get', path, **params)
return BatchIterator(fetcher, per_page=per_page, limit=limit)
def get_activity(self, activity_id, include_all_efforts=True):
"""
Get an athlete activity by id
See docs: http://developers.strava.com/docs/reference/#api-Activities-getActivityById
:param activity_id [int]: activity's id
:param include_all_efforts [bool]: include segment efforts in the response
"""
path = f'activities/{activity_id}/'
return self._dispatcher('get', path, include_all_efforts=include_all_efforts)
def explore_segments(self, bounds, activity_type=None, min_cat=None, max_cat=None):
"""
Returns the top 10 segments matching a specified query.
See docs: http://developers.strava.com/docs/reference/#api-Segments-exploreSegments
:param bounds [Sequence[float]]: The latitude and longitude for two points describing a rectangular
boundary for the search: [southwest corner latitutde, southwest corner longitude, northeast corner
latitude, northeast corner longitude]. Bounds should be a sequence of points sequence:
Example: [[lat, long], [lat, long]]
:param activity_type [str]: Desired activity type. Can be 'running' or 'riding'.
:param min_cat [int]: the minimum climbing category.
:param max_cat [int]: the maximum climbing category.
"""
path = 'segments/explore/'
assert len(bounds) == 4, "Invalid bounds. Must be '[southwest_corner_latitude, southwest_corner_longitude, northeast_corner_latitude, northeast_corner_longitude]'"
params = {'bounds': ','.join(str(bound) for bound in bounds)}
if activity_type:
assert activity_type in ('running', 'riding'), "Invalid 'activity_type'. Must be 'running' or 'riding'"
params['activity_type'] = activity_type
if min_cat:
params['min_cat'] = min_cat
if max_cat:
params['max_cat'] = max_cat
return self._dispatcher('get', path, **params)
def get_segment(self, segment_id):
"""
Return the specified segment by id.
See docs: http://developers.strava.com/docs/reference/#api-Segments-getSegmentById
:param segment_id [int]: Segment id.
"""
path = f'segments/{segment_id}/'
return self._dispatcher('get', path)
def get_segment_efforts(self, segment_id, per_page=50, limit=None):
"""
Return all segment's efforts from activities of the authenticated user.
See docs: http://developers.strava.com/docs/reference/#api-SegmentEfforts-getEffortsBySegmentId
:param segment_id [int]: Segment id.
:param per_page [int]: page size.
:param limit [int]: maximum number of activities to fetch.
"""
path = f'segments/{segment_id}/all_efforts/'
fetcher = partial(self._dispatcher, 'get', path)
return BatchIterator(fetcher, per_page=per_page, limit=limit)
def get_segment_effort(self, effort_id):
"""
Returns a segment effort from an activity that is owned by the authenticated athlete.
See docs: http://developers.strava.com/docs/reference/#api-SegmentEfforts-getSegmentEffortById
:param effort_id [id]: segment effort id
"""
path = f'segment_efforts/{effort_id}/'
return self._dispatcher('get', path)
|
py | 1a3e571269d66cd93752593eb7201aeabe516467 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
def convert_params_for_cell(np_cell, paddle_cell):
state = np_cell.parameters
for k, v in paddle_cell.named_parameters():
v.set_value(state[k])
def convert_params_for_cell_static(np_cell, paddle_cell, place):
state = np_cell.parameters
for k, v in paddle_cell.named_parameters():
scope = paddle.static.global_scope()
tensor = scope.find_var(v.name).get_tensor()
tensor.set(state[k], place)
def convert_params_for_net(np_net, paddle_net):
for np_layer, paddle_layer in zip(np_net, paddle_net):
if hasattr(np_layer, "cell"):
convert_params_for_cell(np_layer.cell, paddle_layer.cell)
else:
convert_params_for_cell(np_layer.cell_fw, paddle_layer.cell_fw)
convert_params_for_cell(np_layer.cell_bw, paddle_layer.cell_bw)
def convert_params_for_net_static(np_net, paddle_net, place):
for np_layer, paddle_layer in zip(np_net, paddle_net):
if hasattr(np_layer, "cell"):
convert_params_for_cell_static(np_layer.cell, paddle_layer.cell,
place)
else:
convert_params_for_cell_static(np_layer.cell_fw,
paddle_layer.cell_fw, place)
convert_params_for_cell_static(np_layer.cell_bw,
paddle_layer.cell_bw, place)
def get_params_for_cell(np_cell, num_layers, idx):
state = np_cell.parameters
weight_list = [('{}.weight_{}'.format(num_layers, idx), state['weight_ih']),
('{}.weight_{}'.format(num_layers,
idx + 1), state['weight_hh'])]
bias_list = [('{}.bias_{}'.format(num_layers, idx), state['bias_ih']),
('{}.bias_{}'.format(num_layers, idx + 1), state['bias_hh'])]
return weight_list, bias_list
def get_params_for_net(np_net):
weight_list = []
bias_list = []
for layer_idx, np_layer in enumerate(np_net):
if hasattr(np_layer, "cell"):
weight, bias = get_params_for_cell(np_layer.cell, layer_idx, 0)
for w, b in zip(weight, bias):
weight_list.append(w)
bias_list.append(b)
else:
for count, cell in enumerate([np_layer.cell_fw, np_layer.cell_bw]):
weight, bias = get_params_for_cell(cell, layer_idx, count * 2)
for w, b in zip(weight, bias):
weight_list.append(w)
bias_list.append(b)
weight_list.extend(bias_list)
return weight_list
|
py | 1a3e5771919ca9b2c12de76a80e8c2d696265242 | from setuptools import setup, find_packages
setup(
name='NETlist_connector',
version='0.2',
description='Parser for subnets and contacts',
author='CSIRT-MU',
keywords='module contact subnet crusoe',
package_data={'NETlist_connector': ['data/subnets_data.txt']},
packages=find_packages(),
install_requires=['structlog', 'requests', 'netaddr', 'dnspython'],
)
|
py | 1a3e5837854bc31b09ca8ab05b4e6bfd2b231091 | from .mhsa import MultiHeadSelfAttention
from .self_attention import SelfAttention
from .transformer_block import TransformerBlock, TransformerEncoder
|
py | 1a3e58af0a50a68a69e70bae28db7822cefac5a9 | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Django middleware helper to capture and trace a request."""
import logging
from opencensus.trace.ext import utils
from opencensus.trace.ext.django.config import (settings, convert_to_import)
from opencensus.trace import attributes_helper
from opencensus.trace import execution_context
from opencensus.trace import span as span_module
from opencensus.trace import tracer as tracer_module
from opencensus.trace.samplers import probability
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # pragma: NO COVER
MiddlewareMixin = object
HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES['HTTP_METHOD']
HTTP_URL = attributes_helper.COMMON_ATTRIBUTES['HTTP_URL']
HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES['HTTP_STATUS_CODE']
REQUEST_THREAD_LOCAL_KEY = 'django_request'
SPAN_THREAD_LOCAL_KEY = 'django_span'
BLACKLIST_PATHS = 'BLACKLIST_PATHS'
GCP_EXPORTER_PROJECT = 'GCP_EXPORTER_PROJECT'
SAMPLING_RATE = 'SAMPLING_RATE'
TRANSPORT = 'TRANSPORT'
SERVICE_NAME = 'SERVICE_NAME'
ZIPKIN_EXPORTER_SERVICE_NAME = 'ZIPKIN_EXPORTER_SERVICE_NAME'
ZIPKIN_EXPORTER_HOST_NAME = 'ZIPKIN_EXPORTER_HOST_NAME'
ZIPKIN_EXPORTER_PORT = 'ZIPKIN_EXPORTER_PORT'
ZIPKIN_EXPORTER_PROTOCOL = 'ZIPKIN_EXPORTER_PROTOCOL'
OCAGENT_TRACE_EXPORTER_ENDPOINT = 'OCAGENT_TRACE_EXPORTER_ENDPOINT'
BLACKLIST_HOSTNAMES = 'BLACKLIST_HOSTNAMES'
log = logging.getLogger(__name__)
class _DjangoMetaWrapper(object):
"""
Wrapper class which takes HTTP header name and retrieve the value from
Django request.META
"""
def __init__(self, meta=None):
self.meta = meta or _get_django_request().META
def get(self, key):
return self.meta.get('HTTP_' + key.upper().replace('-', '_'))
def _get_django_request():
"""Get Django request from thread local.
:rtype: str
:returns: Django request.
"""
return execution_context.get_opencensus_attr(REQUEST_THREAD_LOCAL_KEY)
def _get_django_span():
"""Get Django span from thread local.
:rtype: str
:returns: Django request.
"""
return execution_context.get_opencensus_attr(SPAN_THREAD_LOCAL_KEY)
def _get_current_tracer():
"""Get the current request tracer."""
return execution_context.get_opencensus_tracer()
def _set_django_attributes(span, request):
"""Set the django related attributes."""
django_user = getattr(request, 'user', None)
if django_user is None:
return
user_id = django_user.pk
user_name = django_user.get_username()
# User id is the django autofield for User model as the primary key
if user_id is not None:
span.add_attribute('django.user.id', str(user_id))
if user_name is not None:
span.add_attribute('django.user.name', str(user_name))
class OpencensusMiddleware(MiddlewareMixin):
"""Saves the request in thread local"""
def __init__(self, get_response=None):
# One-time configuration and initialization.
self.get_response = get_response
self._sampler = settings.SAMPLER
self._exporter = settings.EXPORTER
self._propagator = settings.PROPAGATOR
self._blacklist_paths = settings.params.get(BLACKLIST_PATHS)
# Initialize the sampler
if self._sampler.__name__ == 'ProbabilitySampler':
_rate = settings.params.get(
SAMPLING_RATE, probability.DEFAULT_SAMPLING_RATE)
self.sampler = self._sampler(_rate)
else:
self.sampler = self._sampler()
# Initialize the exporter
transport = convert_to_import(settings.params.get(TRANSPORT))
if self._exporter.__name__ == 'GoogleCloudExporter':
_project_id = settings.params.get(GCP_EXPORTER_PROJECT, None)
self.exporter = self._exporter(
project_id=_project_id,
transport=transport)
elif self._exporter.__name__ == 'ZipkinExporter':
_service_name = self._get_service_name(settings.params)
_zipkin_host_name = settings.params.get(
ZIPKIN_EXPORTER_HOST_NAME, 'localhost')
_zipkin_port = settings.params.get(
ZIPKIN_EXPORTER_PORT, 9411)
_zipkin_protocol = settings.params.get(
ZIPKIN_EXPORTER_PROTOCOL, 'http')
self.exporter = self._exporter(
service_name=_service_name,
host_name=_zipkin_host_name,
port=_zipkin_port,
protocol=_zipkin_protocol,
transport=transport)
elif self._exporter.__name__ == 'TraceExporter':
_service_name = self._get_service_name(settings.params)
_endpoint = settings.params.get(
OCAGENT_TRACE_EXPORTER_ENDPOINT, None)
self.exporter = self._exporter(
service_name=_service_name,
endpoint=_endpoint,
transport=transport)
elif self._exporter.__name__ == 'JaegerExporter':
_service_name = self._get_service_name(settings.params)
self.exporter = self._exporter(
service_name=_service_name,
transport=transport)
else:
self.exporter = self._exporter(transport=transport)
self.blacklist_hostnames = settings.params.get(
BLACKLIST_HOSTNAMES, None)
# Initialize the propagator
self.propagator = self._propagator()
def process_request(self, request):
"""Called on each request, before Django decides which view to execute.
:type request: :class:`~django.http.request.HttpRequest`
:param request: Django http request.
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
# Add the request to thread local
execution_context.set_opencensus_attr(
REQUEST_THREAD_LOCAL_KEY,
request)
execution_context.set_opencensus_attr(
'blacklist_hostnames',
self.blacklist_hostnames)
try:
# Start tracing this request
span_context = self.propagator.from_headers(
_DjangoMetaWrapper(_get_django_request().META))
# Reload the tracer with the new span context
tracer = tracer_module.Tracer(
span_context=span_context,
sampler=self.sampler,
exporter=self.exporter,
propagator=self.propagator)
# Span name is being set at process_view
span = tracer.start_span()
span.span_kind = span_module.SpanKind.SERVER
tracer.add_attribute_to_current_span(
attribute_key=HTTP_METHOD,
attribute_value=request.method)
tracer.add_attribute_to_current_span(
attribute_key=HTTP_URL,
attribute_value=str(request.path))
# Add the span to thread local
# in some cases (exceptions, timeouts) currentspan in
# response event will be one of a child spans.
# let's keep reference to 'django' span and
# use it in response event
execution_context.set_opencensus_attr(
SPAN_THREAD_LOCAL_KEY,
span)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
def process_view(self, request, view_func, *args, **kwargs):
"""Process view is executed before the view function, here we get the
function name add set it as the span name.
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
try:
# Get the current span and set the span name to the current
# function name of the request.
tracer = _get_current_tracer()
span = tracer.current_span()
span.name = utils.get_func_name(view_func)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
def process_response(self, request, response):
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return response
try:
span = _get_django_span()
span.add_attribute(
attribute_key=HTTP_STATUS_CODE,
attribute_value=str(response.status_code))
_set_django_attributes(span, request)
tracer = _get_current_tracer()
tracer.end_span()
tracer.finish()
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
finally:
return response
def _get_service_name(self, params):
_service_name = params.get(
SERVICE_NAME, None)
if _service_name is None:
_service_name = params.get(
ZIPKIN_EXPORTER_SERVICE_NAME, 'my_service')
return _service_name
|
py | 1a3e58c64de17d7d0634cc724d3a61639aee2ddb | import os
from aiohttp import web
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from meross_iot.controller.device import HubDevice
from meross_iot.manager import MerossManager
from meross_iot.model.enums import OnlineStatus
from tests import async_get_client
if os.name == 'nt':
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
else:
import asyncio
class TestHub(AioHTTPTestCase):
async def get_application(self):
return web.Application()
async def setUpAsync(self):
# Wait some time before next test-burst
await asyncio.sleep(10)
self.meross_client, self.requires_logout = await async_get_client()
# Look for a device to be used for this test
self.meross_manager = MerossManager(http_client=self.meross_client)
await self.meross_manager.async_init()
await self.meross_manager.async_device_discovery()
self.test_devices = self.meross_manager.find_devices(device_class=HubDevice,
online_status=OnlineStatus.ONLINE)
@unittest_run_loop
async def test_update(self):
if len(self.test_devices) < 1:
self.skipTest("No HUB device has been found to run this test.")
return
dev = self.test_devices[0]
await dev.async_update()
async def tearDownAsync(self):
if self.requires_logout:
await self.meross_client.async_logout()
|
py | 1a3e5970b652fe20f44abfbeaf6c689a5054d526 | import qiskit.quantum_info
from qiskit.quantum_info.synthesis.xx_decompose import XXDecomposer
import numpy as np
from scipy.stats import unitary_group
from monodromy.coverage import *
from monodromy.static.examples import *
from monodromy.haar import expected_cost
import monodromy.render
def default_zx_operation_cost(
strength: Fraction,
# note: Isaac reports this value in percent per degree
scale_factor: float = (64 * 90) / (10000 * 100),
# first component: 2Q invocation cost; second component: local cost
offset: float = 909 / (10000 * 100) + 1 / 1000,
):
"""
A sample fidelity cost model, extracted from experiment, for ZX operations.
"""
return strength * scale_factor + offset
def get_zx_operations(strengths: Dict[Fraction, float]) \
-> List[CircuitPolytope]:
"""
Converts a dictionary mapping fractional CX `strengths` to fidelities to the
corresponding list of `OperationPolytope`s.
"""
operations = []
for strength, fidelity in strengths.items():
operations.append(CircuitPolytope(
operations=[f"rzx(pi/2 * {strength})"],
cost=fidelity,
convex_subpolytopes=exactly(
strength / 4, strength / 4, -strength / 4,
).convex_subpolytopes,
))
return operations
operations = get_zx_operations({
frac: default_zx_operation_cost(frac)
for frac in [Fraction(1), Fraction(1, 2), Fraction(1, 3)]
})
# build the set of covering polytopes
print("==== Working to build a set of covering polytopes ====")
coverage_set = build_coverage_set(operations, chatty=True)
# print it out for user inspection
print("==== Done. Here's what we found: ====")
print_coverage_set(coverage_set)
print("==== Haar volumes ====")
print(f"Haar-expectation cost: {expected_cost(coverage_set, chatty=True)}")
# flex the rendering code
print("==== Render these in Mathematica: =====")
print(monodromy.render.polytopes_to_mathematica(coverage_set))
# perform a gate decomposition
print("==== Compiling a single Haar-random gate into CX, CX/2, CX/3 ====")
# generate a random special unitary
u = unitary_group.rvs(4)
u /= np.linalg.det(u) ** (1 / 4)
# decompose into CX, CX/2, and CX/3
monodromy_decomposer = XXDecomposer(euler_basis="PSX")
circuit = monodromy_decomposer(u, approximate=False)
with np.printoptions(precision=4, suppress=True):
print(u)
print(qiskit.quantum_info.Operator(circuit).data)
print(f"=== {(abs(u - qiskit.quantum_info.Operator(circuit).data) < 1e-1).all()} ===")
print(circuit)
|
py | 1a3e5ab89f650709e6b619da246a7c001e666927 | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import fvote |
py | 1a3e5ae57fdbf88cabe986089c96337873f22a8d | """Emmental version."""
__version__ = "0.0.8+dev"
|
py | 1a3e5cd8fe0d45c79797030ec673d28f43ae39e5 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Category(Model):
"""An object describing identified category.
:param name: Name of the category.
:type name: str
:param score: Scoring of the category.
:type score: float
:param detail: Additional category detail if available.
:type detail:
~azure.cognitiveservices.vision.computervision.models.CategoryDetail
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'score': {'key': 'score', 'type': 'float'},
'detail': {'key': 'detail', 'type': 'CategoryDetail'},
}
def __init__(self, name=None, score=None, detail=None):
super(Category, self).__init__()
self.name = name
self.score = score
self.detail = detail
|
py | 1a3e5d1035d18f5a818de7f4b70958550642d84c | # coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.72
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailchimp_marketing.api_client import ApiClient
class BatchWebhooksApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client):
self.api_client = api_client
def remove(self, batch_webhook_id, **kwargs): # noqa: E501
"""Delete batch webhook # noqa: E501
Remove a batch webhook. Webhooks will no longer be sent to the given URL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove(batch_webhook_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str batch_webhook_id: The unique id for the batch webhook. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_with_http_info(batch_webhook_id, **kwargs) # noqa: E501
else:
(data) = self.remove_with_http_info(batch_webhook_id, **kwargs) # noqa: E501
return data
def remove_with_http_info(self, batch_webhook_id, **kwargs): # noqa: E501
"""Delete batch webhook # noqa: E501
Remove a batch webhook. Webhooks will no longer be sent to the given URL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_with_http_info(batch_webhook_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str batch_webhook_id: The unique id for the batch webhook. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['batch_webhook_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'batch_webhook_id' is set
if ('batch_webhook_id' not in params or
params['batch_webhook_id'] is None):
raise ValueError("Missing the required parameter `batch_webhook_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'batch_webhook_id' in params:
path_params['batch_webhook_id'] = params['batch_webhook_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/batch-webhooks/{batch_webhook_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get(self, batch_webhook_id, **kwargs): # noqa: E501
"""Get batch webhook info # noqa: E501
Get information about a specific batch webhook. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get(batch_webhook_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str batch_webhook_id: The unique id for the batch webhook. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:return: BatchWebhook
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_with_http_info(batch_webhook_id, **kwargs) # noqa: E501
else:
(data) = self.get_with_http_info(batch_webhook_id, **kwargs) # noqa: E501
return data
def get_with_http_info(self, batch_webhook_id, **kwargs): # noqa: E501
"""Get batch webhook info # noqa: E501
Get information about a specific batch webhook. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_with_http_info(batch_webhook_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str batch_webhook_id: The unique id for the batch webhook. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:return: BatchWebhook
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['batch_webhook_id', 'fields', 'exclude_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'batch_webhook_id' is set
if ('batch_webhook_id' not in params or
params['batch_webhook_id'] is None):
raise ValueError("Missing the required parameter `batch_webhook_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'batch_webhook_id' in params:
path_params['batch_webhook_id'] = params['batch_webhook_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/batch-webhooks/{batch_webhook_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BatchWebhook', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list(self, **kwargs): # noqa: E501
"""List batch webhooks # noqa: E501
Get all webhooks that have been configured for batches. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param int count: The number of records to return. Default value is 10. Maximum value is 1000
:param int offset: Used for [pagination](https://mailchimp.com/developer/marketing/docs/methods-parameters/#pagination), this it the number of records from a collection to skip. Default value is 0.
:return: BatchWebhooks
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_with_http_info(**kwargs) # noqa: E501
return data
def list_with_http_info(self, **kwargs): # noqa: E501
"""List batch webhooks # noqa: E501
Get all webhooks that have been configured for batches. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param int count: The number of records to return. Default value is 10. Maximum value is 1000
:param int offset: Used for [pagination](https://mailchimp.com/developer/marketing/docs/methods-parameters/#pagination), this it the number of records from a collection to skip. Default value is 0.
:return: BatchWebhooks
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['fields', 'exclude_fields', 'count', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list" % key
)
params[key] = val
del params['kwargs']
if 'count' in params and params['count'] > 1000: # noqa: E501
raise ValueError("Invalid value for parameter `count` when calling ``, must be a value less than or equal to `1000`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/batch-webhooks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BatchWebhooks', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update(self, batch_webhook_id, body, **kwargs): # noqa: E501
"""Update batch webhook # noqa: E501
Update a webhook that will fire whenever any batch request completes processing. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update(batch_webhook_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str batch_webhook_id: The unique id for the batch webhook. (required)
:param BatchWebhook2 body: (required)
:return: BatchWebhook
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_with_http_info(batch_webhook_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_with_http_info(batch_webhook_id, body, **kwargs) # noqa: E501
return data
def update_with_http_info(self, batch_webhook_id, body, **kwargs): # noqa: E501
"""Update batch webhook # noqa: E501
Update a webhook that will fire whenever any batch request completes processing. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_with_http_info(batch_webhook_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str batch_webhook_id: The unique id for the batch webhook. (required)
:param BatchWebhook2 body: (required)
:return: BatchWebhook
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['batch_webhook_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'batch_webhook_id' is set
if ('batch_webhook_id' not in params or
params['batch_webhook_id'] is None):
raise ValueError("Missing the required parameter `batch_webhook_id` when calling ``") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'batch_webhook_id' in params:
path_params['batch_webhook_id'] = params['batch_webhook_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/batch-webhooks/{batch_webhook_id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BatchWebhook', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create(self, body, **kwargs): # noqa: E501
"""Add batch webhook # noqa: E501
Configure a webhook that will fire whenever any batch request completes processing. You may only have a maximum of 20 batch webhooks. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BatchWebhook1 body: (required)
:return: BatchWebhook
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_with_http_info(body, **kwargs) # noqa: E501
return data
def create_with_http_info(self, body, **kwargs): # noqa: E501
"""Add batch webhook # noqa: E501
Configure a webhook that will fire whenever any batch request completes processing. You may only have a maximum of 20 batch webhooks. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BatchWebhook1 body: (required)
:return: BatchWebhook
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/batch-webhooks', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BatchWebhook', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | 1a3e5d308b5aa0a24be47e57612c8cba46e3646d | from _collections import deque
males = [int(x) for x in input().split()]
females = deque([int(x) for x in input().split()])
matches = 0
while males and females:
current_female = females[0]
current_male = males[-1]
if current_female <= 0:
females.popleft()
elif current_male <= 0:
males.pop()
elif current_male == current_female:
females.popleft()
males.pop()
matches += 1
elif current_female % 25 == 0:
females.popleft()
if females:
females.popleft()
elif current_male % 25 == 0:
males.pop()
if males:
males.pop()
else:
females.popleft()
#males[-1] -= 2
males.append(males.pop() - 2)
print(f"Matches: {matches}")
if males:
print(f"Males left: {', '.join(reversed([str(x) for x in males]))}")
else:
print(f"Males left: none")
if females:
print(f"Females left: {', '.join([str(x) for x in females])}")
else:
print(f"Females left: none") |
py | 1a3e5d8bb641830a68c8285591cf3e6f07e499bb | # --------------
# Code starts here
class_1 = [ 'Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
print(class_1)
class_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes']
print(class_2)
new_class = class_1 + class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
# Code ends here
# --------------
# Code starts here
courses = {'Math' : 65, 'English' : 70, 'History' : 80, 'French' : 70,'Science' : 60}
print(courses)
total = sum(courses.values())
print(total)
percentage = (total / 500)*100
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics = {'Geoffrey Hinton' : 78, 'Andrew Ng' : 95, 'Sebastian Raschka' : 65, 'Yoshua Benjio' : 50,
'Hilary Mason':70, 'Corinna Cortes' : 66, 'Peter Warden' : 75}
print(mathematics)
topper = max(mathematics , key=mathematics.get)
print(topper)
# Code ends here
# --------------
# Code starts here
# Given string
topper = 'andrew ng'
print('-'*10)
first_name = topper.split()[0]
print(first_name)
last_name = topper.split()[1]
print(last_name)
full_name = last_name + ' ' + first_name
print(full_name)
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here
|
py | 1a3e5db5a0e40e426718baf601f33f03f884769a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import fnmatch
import time
import re
import datetime
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy import units as u
from astropy import _erfa as erfa
from .utils import day_frac, quantity_day_frac, two_sum, two_product
__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix',
'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear',
'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString',
'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime',
'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch',
'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD',
'TimeEpochDateString', 'TimeBesselianEpochString',
'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS',
'TimezoneInfo', 'TimeDeltaDatetime', 'TimeDatetime64']
__doctest_skip__ = ['TimePlotDate']
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt',
'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'}
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
for strptime_code, regex in (('%Y', r'(?P<year>\d\d\d\d)'),
('%m', r'(?P<mon>\d{1,2})'),
('%d', r'(?P<mday>\d{1,2})'),
('%H', r'(?P<hour>\d{1,2})'),
('%M', r'(?P<min>\d{1,2})'),
('%S', r'(?P<sec>\d{1,2})')):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if '%' not in subfmt_in:
subfmt_tuple = (subfmt_tuple[0],
re.compile(subfmt_in + '$'),
subfmt_tuple[2])
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormatMeta(type):
"""
Metaclass that adds `TimeFormat` and `TimeDeltaFormat` to the
`TIME_FORMATS` and `TIME_DELTA_FORMATS` registries, respectively.
"""
_registry = TIME_FORMATS
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Register time formats that have a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if 'name' in members and cls.name != 'astropy_time':
mcls._registry[cls.name] = cls
if 'subfmts' in members:
cls.subfmts = _regexify_subfmts(members['subfmts'])
return cls
class TimeFormat(metaclass=TimeFormatMeta):
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
_default_scale = 'utc' # As of astropy 0.4
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale"""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if 'mask' not in self.cache:
self.cache['mask'] = np.isnan(self.jd2)
if self.cache['mask'].shape:
self.cache['mask'].flags.writeable = False
return self.cache['mask']
@property
def masked(self):
if 'masked' not in self.cache:
self.cache['masked'] = bool(np.any(self.mask))
return self.cache['masked']
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
# val1 cannot contain nan, but val2 can contain nan
ok1 = (val1.dtype == np.double and np.all(np.isfinite(val1)) or
val1.size == 0)
ok2 = val2 is None or (val2.dtype == np.double and
not np.any(np.isinf(val2))) or val2.size == 0
if not (ok1 and ok2):
raise TypeError('Input values for {} class must be finite doubles'
.format(self.name))
if getattr(val1, 'unit', None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without loosing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances.")
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1. / getattr(self, 'unit', 1.)
if factor != 1.:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, 'unit', None) is not None:
raise TypeError('Cannot mix float and Quantity inputs')
if val2 is None:
val2 = np.zeros_like(val1)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if scale is None:
scale = self._default_scale
if scale not in TIME_SCALES:
raise ScaleValueError("Scale value '{}' not in "
"allowed values {}"
.format(scale, TIME_SCALES))
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None):
"""
Return time representation from internal jd1 and jd2. This is
the base method that ignores ``parent`` and requires that
subclasses implement the ``value`` property. Subclasses that
require ``parent`` or have other optional args for ``to_value``
should compute and return the value directly.
"""
return self.mask_if_needed(self.value)
@property
def value(self):
raise NotImplementedError
class TimeJD(TimeFormat):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = 'jd'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
@property
def value(self):
return self.jd1 + self.jd2
class TimeMJD(TimeFormat):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = 'mjd'
def set_jds(self, val1, val2):
# TODO - this routine and vals should be Cythonized to follow the ERFA
# convention of preserving precision by adding to the larger of the two
# values in a vectorized operation. But in most practical cases the
# first one is probably biggest.
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
return (self.jd1 - erfa.DJM0) + self.jd2
class TimeDecimalYear(TimeFormat):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year. For example 2000.5 corresponds to the
ISO time '2000-07-02 00:00:00'.
"""
name = 'decimalyear'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
@property
def value(self):
scale = self.scale.upper().encode('ascii')
iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0
self.jd1, self.jd2_filled)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return decimalyear
class TimeFromEpoch(TimeFormat):
"""
Base class for times that represent the interval from a particular
epoch as a floating point multiple of a unit time interval (e.g. seconds
or days).
"""
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale
# Initialize the reference epoch (a single time defined in subclasses)
epoch = Time(self.epoch_val, self.epoch_val2, scale=self.epoch_scale,
format=self.epoch_format)
self.epoch = epoch
# Now create the TimeFormat object as normal
super().__init__(val1, val2, scale, precision, in_subfmt, out_subfmt,
from_jd)
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1. / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(Time(jd1, jd2, scale=self.epoch_scale,
format='jd'), self.scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'"
"to specified scale '{}', got error:\n{}"
.format(self.name, self.epoch_scale,
self.scale, err))
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError('cannot compute value without parent Time object')
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'"
"to specified scale '{}', got error:\n{}"
.format(self.name, self.epoch_scale,
self.scale, err))
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
time_from_epoch = ((jd1 - self.epoch.jd1) +
(jd2 - self.epoch.jd2)) / self.unit
return self.mask_if_needed(time_from_epoch)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
class TimeUnix(TimeFromEpoch):
"""
Unix time: seconds from 1970-01-01 00:00:00 UTC.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = 'unix'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1970-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'iso'
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = 'cxcsec'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1998-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'tt'
epoch_format = 'iso'
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
=====
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer
"""
name = 'gps'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1980-01-06 00:00:19'
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = 'tai'
epoch_format = 'iso'
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = 'plot_date'
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'jd'
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = 'astropy_time'
def __new__(cls, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0)
for val in val1.flat)):
raise TypeError('Input values for {} class must all be same '
'astropy Time type.'.format(cls.name))
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt,
from_jd=True)
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = 'datetime'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError('Input values for {} class must be '
'datetime objects'.format(self.name))
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2"""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, timezone=None, parent=None):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime
object.
Parameters
----------
timezone : {`~datetime.tzinfo`, None} (optional)
If not `None`, return timezone-aware datetime.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if timezone is not None:
if self._scale != 'utc':
raise ScaleValueError("scale is {}, must be 'utc' when timezone "
"is supplied.".format(self._scale))
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec
self.jd1, self.jd2_filled)
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=7*[None] + [object])
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
raise ValueError('Time {} is within a leap second but datetime '
'does not support leap seconds'
.format((iy, im, id, ihr, imin, isec, ifracsec)))
if timezone is not None:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec,
tzinfo=TimezoneInfo()).astimezone(timezone)
else:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0*u.day, dst=0*u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity` (optional)
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity` (optional)
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : string, `None` (optional)
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = 'UTC'
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
This is a reference implementation can be made much faster with effort.
"""
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for these classes
if val1.dtype.kind not in ('S', 'U') and val1.size:
raise TypeError('Input values for {} class must be strings'
.format(self.name))
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ('year', 'mon', 'mday', 'hour', 'min', 'sec')
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex('.')
except Exception:
fracsec = 0.0
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, 'tm_' + component)
for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [int(tm.get(component, default)) for component, default
in zip(components, defaults)]
# Add fractional seconds
vals[-1] = vals[-1] + fracsec
return vals
else:
raise ValueError('Time {} does not match {} format'
.format(timestr, self.name))
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2"""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['zerosize_ok'],
op_dtypes=[None] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = (
self.parse_string(val, subfmts))
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = self.scale.upper().encode('ascii'),
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision,
self.jd1, self.jd2_filled)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
if '{yday:' in str_fmt:
has_yday = True
else:
has_yday = False
yday = None
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs],
flags=['zerosize_ok']):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {'year': int(iy), 'mon': int(im), 'day': int(id),
'hour': int(ihr), 'min': int(imin), 'sec': int(isec),
'fracsec': int(ifracsec), 'yday': yday}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith('{sec:02d}'):
str_fmt += '.{fracsec:0' + str(self.precision) + 'd}'
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
def _select_subfmts(self, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
"""
fnmatchcase = fnmatch.fnmatchcase
subfmts = [x for x in self.subfmts if fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
raise ValueError(f'No subformats match {pattern}')
return subfmts
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'iso'
subfmts = (('date_hms',
'%Y-%m-%d %H:%M:%S',
# XXX To Do - use strftime for output ??
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%d %H:%M',
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith('Z'):
if self.scale != 'utc':
raise ValueError("Time input terminating in 'Z' must have "
"scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'isot'
subfmts = (('date_hms',
'%Y-%m-%dT%H:%M:%S',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%dT%H:%M',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'yday'
subfmts = (('date_hms',
'%Y:%j:%H:%M:%S',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y:%j:%H:%M',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}'),
('date',
'%Y:%j',
'{year:d}:{yday:03d}'))
class TimeDatetime64(TimeISOT):
name = 'datetime64'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class`
if not val1.dtype.kind == 'M':
if val1.size > 0:
raise TypeError('Input values for {} class must be '
'datetime64 objects'.format(self.name))
else:
val1 = np.array([], 'datetime64[D]')
return val1, None
def set_jds(self, val1, val2):
# If there are any masked values in the ``val1`` datetime64 array
# ('NaT') then stub them with a valid date so downstream parse_string
# will work. The value under the mask is arbitrary but a "modern" date
# is good.
mask = np.isnat(val1)
masked = np.any(mask)
if masked:
val1 = val1.copy()
val1[mask] = '2000'
# Make sure M(onth) and Y(ear) dates will parse and convert to bytestring
if val1.dtype.name in ['datetime64[M]', 'datetime64[Y]']:
val1 = val1.astype('datetime64[D]')
val1 = val1.astype('S')
# Standard ISO string parsing now
super().set_jds(val1, val2)
# Finally apply mask if necessary
if masked:
self.jd2[mask] = np.nan
@property
def value(self):
precision = self.precision
self.precision = 9
ret = super().value
self.precision = precision
return ret.astype('datetime64')
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]".
ISOT but can give signed five-digit year (mostly for negative years);
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = 'fits'
subfmts = (
('date_hms',
(r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date',
r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:04d}-{mon:02d}-{day:02d}'),
('longdate_hms',
(r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('longdate',
r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:+06d}-{mon:02d}-{day:02d}'))
# Add the regex that parses the scale and possible realization.
# Support for this is deprecated. Read old style but no longer write
# in this style.
subfmts = tuple(
(subfmt[0],
subfmt[1] + r'(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?',
subfmt[2]) for subfmt in subfmts)
def parse_string(self, timestr, subfmts):
"""Read time and deprecated scale if present"""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError('Time {} does not match {} format'
.format(timestr, self.name))
tm = tm.groupdict()
# Scale and realization are deprecated and strings in this form
# are no longer created. We issue a warning but still use the value.
if tm['scale'] is not None:
warnings.warn("FITS time strings should no longer have embedded time scale.",
AstropyDeprecationWarning)
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm['scale'].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError("Scale {!r} is not in the allowed scales {}"
.format(scale, sorted(TIME_SCALES)))
# If no scale was given in the initialiser, set the scale to
# that given in the string. Realization is ignored
# and is only supported to allow old-style strings to be
# parsed.
if self._scale is None:
self._scale = scale
if scale != self.scale:
raise ValueError("Input strings for {} class must all "
"have consistent time scales."
.format(self.name))
return [int(tm['year']), int(tm['mon']), int(tm['mday']),
int(tm.get('hour', 0)), int(tm.get('min', 0)),
float(tm.get('sec', 0.))]
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if 'long' not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5):
self.out_subfmt = 'long' + self.out_subfmt
return super().value
class TimeEpochDate(TimeFormat):
"""
Base class for support floating point Besselian and Julian epoch dates
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
return jd_to_epoch(self.jd1, self.jd2)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as floating point value(s) like 1950.0"""
name = 'byear'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
if hasattr(val1, 'to') and hasattr(val1, 'unit'):
raise ValueError("Cannot use Quantities for 'byear' format, "
"as the interpretation would be ambiguous. "
"Use float with Besselian year instead. ")
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as floating point value(s) like 2000.0"""
name = 'jyear'
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double],
flags=['zerosize_ok'])
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError('Time {} does not match {} format'
.format(time_str, self.name))
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f'
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'"""
name = 'byear_str'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
epoch_prefix = 'B'
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'"""
name = 'jyear_str'
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
epoch_prefix = 'J'
class TimeDeltaFormatMeta(TimeFormatMeta):
_registry = TIME_DELTA_FORMATS
class TimeDeltaFormat(TimeFormat, metaclass=TimeDeltaFormatMeta):
"""Base class for time delta representations"""
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError("Scale value '{}' not in "
"allowed values {}"
.format(scale, TIME_DELTA_SCALES))
return scale
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1./self.unit)
@property
def value(self):
return (self.jd1 + self.jd2) / self.unit
class TimeDeltaSec(TimeDeltaFormat):
"""Time delta in SI seconds"""
name = 'sec'
unit = 1. / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaFormat):
"""Time delta in Julian days (86400 SI seconds)"""
name = 'jd'
unit = 1.
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta"""
name = 'datetime'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError('Input values for {} class must be '
'datetime.timedelta objects'.format(self.name))
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer([val1, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + [np.double])
for val, sec in iterator:
sec[...] = val.item().total_seconds()
self.jd1, self.jd2 = day_frac(iterator.operands[-1], 0.0,
divisor=erfa.DAYSEC)
@property
def value(self):
iterator = np.nditer([self.jd1 + self.jd2, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + [object])
for jd, out in iterator:
out[...] = datetime.timedelta(days=jd.item())
return self.mask_if_needed(iterator.operands[-1])
from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError
|
py | 1a3e5df56d8dbbbaa088b59864eec89ab874bd65 | from pymagnitude import *
import configparser
from loguru import logger
class Embedding(object):
def __init__(self, config_path="config.cfg"):
self.config_path = config_path
config = configparser.ConfigParser()
config.read(self.config_path)
self.EMBEDING_FILE = config.get("DATA", "EMBEDING_FILE")
self.EMBEDDING_DIM = config.getint("DATA", "EMBEDING_DIM")
try:
self._load_embedding()
except:
raise ValueError(f"Model File Not found: {self.EMBEDING_FILE}")
def __str__(self):
return f"config_path:{self.config_path}, embedding_file_path:{self.EMBEDING_FILE}, embedding_dimension:{self.EMBEDDING_DIM}, length:{self.WEM.length}"
def __repr__(self):
return self.__str__()
def _load_embedding(self):
logger.info(f"Loading Embeddings from: {self.EMBEDING_FILE}")
self.WEM = Magnitude(self.EMBEDING_FILE)
def get_embedding(self):
return self.WEM
def get_embedding_dim(self):
return self.EMBEDDING_DIM
|
py | 1a3e5ee2ff9ceb2bb2d5b457df6b58c642b42398 | from heaty.__main__ import main
if __name__ == '__main__':
main()
|
py | 1a3e608751dc3c15c9cd43cc2047c3199334eb63 | # Generated by Django 2.2 on 2019-04-20 15:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('fleamarket', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='abstractad',
name='seller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
py | 1a3e609aab6e242f32c690a5c9a9a6a2516d92d1 | from __future__ import print_function
import collections
import logging
from itertools import chain, product
import math
import random
_logger = logging.getLogger(__name__)
EvaluationConfig = collections.namedtuple('EvaluationConfig',
['num_samples', 'sample_size'])
FORMAT_STRINGS = {
'default': """Filename : {name}
Num samples: {samplesize_count}
Sample size: {samplesize_avg}
F-score : {fscore_avg:.3}
Precision : {precision_avg:.3}
Recall : {recall_avg:.3}""",
'table': "{name:10} {precision_avg:6.3} {recall_avg:6.3} {fscore_avg:6.3}",
'latex': "{name} & {precision_avg:.3} &"
" {recall_avg:.3} & {fscore_avg:.3} \\\\"}
def _sample(compound_list, size, seed):
"""Create a specific size sample from the compound list using a specific
seed"""
return random.Random(seed).sample(compound_list, size)
class MorfessorEvaluationResult(object):
"""A MorfessorEvaluationResult is returned by a MorfessorEvaluation
object. It's purpose is to store the evaluation data and provide nice
formatting options.
Each MorfessorEvaluationResult contains the data of 1 evaluation
(which can have multiple samples).
"""
print_functions = {'avg': lambda x: sum(x) / len(x),
'min': min,
'max': max,
'values': list,
'count': len}
#TODO add maybe std as a print function?
def __init__(self, meta_data=None):
self.meta_data = meta_data
self.precision = []
self.recall = []
self.fscore = []
self.samplesize = []
self._cache = None
def __getitem__(self, item):
"""Provide dict style interface for all values (standard values and
metadata)"""
if self._cache is None:
self._fill_cache()
return self._cache[item]
def add_data_point(self, precision, recall, f_score, sample_size):
"""Method used by MorfessorEvaluation to add the results of a single
sample to the object"""
self.precision.append(precision)
self.recall.append(recall)
self.fscore.append(f_score)
self.samplesize.append(sample_size)
#clear cache
self._cache = None
def __str__(self):
"""Method for default visualization"""
return self.format(FORMAT_STRINGS['default'])
def _fill_cache(self):
""" Pre calculate all variable / function combinations and put them in
cache"""
self._cache = {'{}_{}'.format(val, func_name): func(getattr(self, val))
for val in ('precision', 'recall', 'fscore',
'samplesize')
for func_name, func in self.print_functions.items()}
self._cache.update(self.meta_data)
def _get_cache(self):
""" Fill the cache (if necessary) and return it"""
if self._cache is None:
self._fill_cache()
return self._cache
def format(self, format_string):
""" Format this object. The format string can contain all variables,
e.g. fscore_avg, precision_values or any item from metadata"""
return format_string.format(**self._get_cache())
class MorfessorEvaluation(object):
""" Do the evaluation of one model, on one testset. The basic procedure is
to create, in a stable manner, a number of samples and evaluate them
independently. The stable selection of samples makes it possible to use
the resulting values for Pair-wise statistical significance testing.
reference_annotations is a standard annotation dictionary:
{compound => ([annoation1],.. ) }
"""
def __init__(self, reference_annotations):
self.reference = {}
for compound, analyses in reference_annotations.items():
self.reference[compound] = list(
tuple(self._segmentation_indices(a)) for a in analyses)
self._samples = {}
def _create_samples(self, configuration=EvaluationConfig(10, 1000)):
"""Create, in a stable manner, n testsets of size x as defined in
test_configuration
"""
#TODO: What is a reasonable limit to warn about a too small testset?
if len(self.reference) < (configuration.num_samples *
configuration.sample_size):
_logger.warn("The test set is too small for this sample size")
compound_list = sorted(self.reference.keys())
self._samples[configuration] = [
_sample(compound_list, configuration.sample_size, i) for i in
range(configuration.num_samples)]
def get_samples(self, configuration=EvaluationConfig(10, 1000)):
"""Get a list of samples. A sample is a list of compounds.
This method is stable, so each time it is called with a specific
test_set and configuration it will return the same samples. Also this
method caches the samples in the _samples variable.
"""
if not configuration in self._samples:
self._create_samples(configuration)
return self._samples[configuration]
def _evaluate(self, prediction):
"""Helper method to get the precision and recall of 1 sample"""
def calc_prop_distance(ref, pred):
if len(ref) == 0:
return 1.0
diff = len(set(ref) - set(pred))
return (len(ref) - diff) / float(len(ref))
wordlist = sorted(set(prediction.keys()) & set(self.reference.keys()))
recall_sum = 0.0
precis_sum = 0.0
for word in wordlist:
if len(word) < 2:
continue
recall_sum += max(calc_prop_distance(r, p)
for p, r in product(prediction[word],
self.reference[word]))
precis_sum += max(calc_prop_distance(p, r)
for p, r in product(prediction[word],
self.reference[word]))
precision = precis_sum / len(wordlist)
recall = recall_sum / len(wordlist)
f_score = 2.0 / (1.0 / precision + 1.0 / recall)
return precision, recall, f_score, len(wordlist)
@staticmethod
def _segmentation_indices(annotation):
"""Method to transform a annotation into a tuple of split indices"""
cur_len = 0
for a in annotation[:-1]:
cur_len += len(a)
yield cur_len
def evaluate_model(self, model, configuration=EvaluationConfig(10, 1000),
meta_data=None):
"""Get the prediction of the test samples from the model and do the
evaluation
The meta_data object has preferably at least the key 'name'.
"""
if meta_data is None:
meta_data = {'name': 'UNKNOWN'}
mer = MorfessorEvaluationResult(meta_data)
for i, sample in enumerate(self.get_samples(configuration)):
_logger.debug("Evaluating sample {}".format(i))
prediction = {}
for compound in sample:
prediction[compound] = [tuple(self._segmentation_indices(
model.viterbi_segment(compound)[0]))]
mer.add_data_point(*self._evaluate(prediction))
return mer
def evaluate_segmentation(self, segmentation,
configuration=EvaluationConfig(10, 1000),
meta_data=None):
"""Method for evaluating an existing segmentation"""
def merge_constructions(constructions):
compound = constructions[0]
for i in range(1, len(constructions)):
compound = compound + constructions[i]
return compound
segmentation = {merge_constructions(x[1]):
[tuple(self._segmentation_indices(x[1]))]
for x in segmentation}
if meta_data is None:
meta_data = {'name': 'UNKNOWN'}
mer = MorfessorEvaluationResult(meta_data)
for i, sample in enumerate(self.get_samples(configuration)):
_logger.debug("Evaluating sample {}".format(i))
prediction = {k: v for k, v in segmentation.items() if k in sample}
mer.add_data_point(*self._evaluate(prediction))
return mer
class WilcoxonSignedRank(object):
"""Class for doing statistical signficance testing with the Wilcoxon
Signed-Rank test
It implements the Pratt method for handling zero-differences and
applies a 0.5 continuity correction for the z-statistic.
"""
@staticmethod
def _wilcoxon(d, method='pratt', correction=True):
if method not in ('wilcox', 'pratt'):
raise ValueError
if method == 'wilcox':
d = list(filter(lambda a: a != 0, d))
count = len(d)
ranks = WilcoxonSignedRank._rankdata([abs(v) for v in d])
rank_sum_pos = sum(r for r, v in zip(ranks, d) if v > 0)
rank_sum_neg = sum(r for r, v in zip(ranks, d) if v < 0)
test = min(rank_sum_neg, rank_sum_pos)
mean = count * (count + 1) * 0.25
stdev = (count*(count + 1) * (2 * count + 1))
# compensate for duplicate ranks
no_zero_ranks = [r for i, r in enumerate(ranks) if d[i] != 0]
stdev -= 0.5 * sum(x * (x*x-1) for x in
collections.Counter(no_zero_ranks).values())
stdev = math.sqrt(stdev / 24.0)
if correction:
correction = +0.5 if test > mean else -0.5
else:
correction = 0
z = (test - mean - correction) / stdev
return 2 * WilcoxonSignedRank._norm_cum_pdf(abs(z))
@staticmethod
def _rankdata(d):
od = collections.Counter()
for v in d:
od[v] += 1
rank_dict = {}
cur_rank = 1
for val, count in sorted(od.items(), key=lambda x: x[0]):
rank_dict[val] = (cur_rank + (cur_rank + count - 1)) / 2
cur_rank += count
return [rank_dict[v] for v in d]
@staticmethod
def _norm_cum_pdf(z):
"""Pure python implementation of the normal cumulative pdf function"""
return 0.5 - 0.5 * math.erf(z / math.sqrt(2))
def significance_test(self, evaluations, val_property='fscore_values',
name_property='name'):
"""Takes a set of evaluations (which should have the same
test-configuration) and calculates the p-value for the Wilcoxon signed
rank test
Returns a dictionary with (name1,name2) keys and p-values as values.
"""
results = {r[name_property]: r[val_property] for r in evaluations}
if any(len(x) < 10 for x in results.values()):
_logger.error("Too small number of samples for the Wilcoxon test")
return {}
p = {}
for r1, r2 in product(results.keys(), results.keys()):
p[(r1, r2)] = self._wilcoxon([v1-v2
for v1, v2 in zip(results[r1],
results[r2])])
return p
@staticmethod
def print_table(results):
"""Nicely format a results table as returned by significance_test"""
names = sorted(set(r[0] for r in results.keys()))
col_width = max(max(len(n) for n in names), 5)
for h in chain([""], names):
print('{:{width}}'.format(h, width=col_width), end='|')
print()
for name in names:
print('{:{width}}'.format(name, width=col_width), end='|')
for name2 in names:
print('{:{width}.5}'.format(results[(name, name2)],
width=col_width), end='|')
print()
|
py | 1a3e609b866f7f7252368534e74d71022c178dfa | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import time
import jax
import jax.numpy as jnp
import networkx as nx
import src.sign_recovery as sign_recovery
from src.global_vars import *
from src.hyperplane_normal import get_ratios_lstsq
from src.tracker import Logger, Tracker
from src.utils import AcceptableFailure, GatherMoreData, matmul, KnownT, cheat_get_inner_layers, which_is_zero
logger = Logger()
@jax.jit
def process_block(ratios, other_ratios):
"""
Let jax efficiently compute pairwise similarity by blocking things.
"""
differences = jnp.abs(ratios[:, jnp.newaxis, :] - other_ratios[jnp.newaxis, :, :])
differences = differences / jnp.abs(ratios[:, jnp.newaxis, :]) + differences / jnp.abs(
other_ratios[jnp.newaxis, :, :])
close = differences < BLOCK_ERROR_TOL * jnp.log(ratios.shape[1])
pairings = jnp.sum(close, axis=2) >= max(MIN_SAME_SIZE, BLOCK_MULTIPLY_FACTOR * (np.log(ratios.shape[1]) - 2))
return pairings
def graph_solve(all_ratios, all_criticals, expected_neurons, LAYER, debug=False):
# 1. Load the critical points and ratios we precomputed
all_ratios = np.array(all_ratios, dtype=np.float64)
all_ratios_f32 = np.array(all_ratios, dtype=np.float32)
all_criticals = np.array(all_criticals, dtype=np.float64)
# Batch them to be sensibly sized
ratios_group = [all_ratios_f32[i:i + 1000] for i in range(0, len(all_ratios), 1000)]
criticals_group = [all_criticals[i:i + 1000] for i in range(0, len(all_criticals), 1000)]
# 2. Compute the similarity pairwise between the ratios we've computed
logger.log("Go up to", len(criticals_group), level=Logger.INFO)
now = time.time()
all_pairings = [[] for _ in range(sum(map(len, ratios_group)))]
for batch_index, (criticals, ratios) in enumerate(zip(criticals_group, ratios_group)):
logger.log(batch_index, level=Logger.INFO)
# Compute the all-pairs similarity
axis = list(range(all_ratios.shape[1]))
random.shuffle(axis)
axis = axis[:20]
for dim in axis:
# We may have an error on one of the directions, so let's try all of them
scaled_all_ratios = all_ratios_f32 / all_ratios_f32[:, dim:dim + 1]
scaled_ratios = ratios / ratios[:, dim:dim + 1]
batch_pairings = process_block(scaled_ratios, scaled_all_ratios)
# To get the offset, Compute the cumsum of the length up to batch_index
batch_offset = sum(map(len, ratios_group[:batch_index]))
# And now create the graph matching ratios that are similar
for this_batch_i, global_j in zip(*np.nonzero(np.array(batch_pairings))):
all_pairings[this_batch_i + batch_offset].append(global_j)
print(time.time() - now)
graph = nx.Graph()
# Add the edges to the graph, removing self-loops
graph.add_edges_from([(i, j) for i, js in enumerate(all_pairings) for j in js if abs(i - j) > 1])
components = list(nx.connected_components(graph))
sorted_components = sorted(components, key=lambda x: -len(x))
if CHEATING:
logger.log('Total (unmatched) examples found:',
sorted(collections.Counter(which_is_zero(LAYER, cheat_get_inner_layers(all_criticals))).items()),
level=Logger.INFO)
if len(components) == 0:
logger.log('No components found', level=Logger.ERROR)
raise AcceptableFailure()
logger.log("Graph search found", len(components), "different components with the following counts",
list(map(len, sorted_components)), level=Logger.INFO)
if CHEATING:
which_neurons = [
collections.Counter(which_is_zero(LAYER, cheat_get_inner_layers(all_criticals[list(orig_component)]))) for
orig_component in sorted_components]
first_index_of = [-1] * expected_neurons
for i, items in enumerate(which_neurons):
for item in items.keys():
if first_index_of[item] == -1:
first_index_of[item] = i
logger.log('These components correspond to', which_neurons, level=Logger.INFO)
logger.log('With the corresponding index in the list:', first_index_of, level=Logger.INFO)
previous_num_components = np.inf
while previous_num_components > len(sorted_components):
previous_num_components = len(sorted_components)
candidate_rows = []
candidate_components = []
datas = [all_ratios[list(component)] for component in sorted_components]
results = pool[0].map(ratio_normalize, datas)
candidate_rows = [x[0] for x in results]
candidate_components = sorted_components
candidate_rows = np.array(candidate_rows)
new_pairings = [[] for _ in range(len(candidate_rows))]
# Re-do the pairings
for dim in range(all_ratios.shape[1]):
scaled_ratios = candidate_rows / candidate_rows[:, dim:dim + 1]
batch_pairings = process_block(scaled_ratios, scaled_ratios)
# And now create the graph matching ratios that are similar
for this_batch_i, global_j in zip(*np.nonzero(np.array(batch_pairings))):
new_pairings[this_batch_i].append(global_j)
graph = nx.Graph()
# Add the edges to the graph, ALLOWING self-loops this time
graph.add_edges_from([(i, j) for i, js in enumerate(new_pairings) for j in js])
components = list(nx.connected_components(graph))
components = [sum([list(candidate_components[y]) for y in comp], []) for comp in components]
sorted_components = sorted(components, key=lambda x: -len(x))
logger.log("After re-doing the graph, the component counts is", len(components), "with items",
list(map(len, sorted_components)), level=Logger.INFO)
if CHEATING:
which_neurons = [
collections.Counter(which_is_zero(LAYER, cheat_get_inner_layers(all_criticals[list(orig_component)])))
for orig_component in sorted_components]
first_index_of = [-1] * expected_neurons
for i, items in enumerate(which_neurons):
for item in items.keys():
if first_index_of[item] == -1:
first_index_of[item] = i
logger.log('Corresponding to', which_neurons, level=Logger.INFO)
logger.log("First index:", first_index_of, level=Logger.INFO)
logger.log("Expected neurons", expected_neurons, level=Logger.INFO)
logger.log("Processing each connected component in turn.", level=Logger.INFO)
resulting_examples = []
resulting_rows = []
skips_because_of_nan = 0
failure = None
for c_count, component in enumerate(sorted_components):
if debug:
logger.log("\n", level=Logger.DEBUG)
if c_count >= expected_neurons:
logger.log("WARNING: This one might be a duplicate!", level=Logger.DEBUG)
logger.log("On component", c_count, "with indexs", component, level=Logger.INFO)
if debug and CHEATING:
inner = cheat_get_inner_layers(all_criticals[list(component)])
logger.log('Corresponding to (cheating) ', which_is_zero(LAYER, inner), level=Logger.DEBUG)
possible_matrix_rows = all_ratios[list(component)]
guessed_row, normalize_axis, normalize_error = ratio_normalize(possible_matrix_rows)
logger.log('The guessed error in the computation is', normalize_error, 'with', len(component), 'witnesses',
level=Logger.INFO)
if normalize_error > .01 and len(component) <= 5:
logger.log("Component size less than 5 with high error; this isn't enough to be sure",
level=Logger.INFO)
continue
logger.log("Normalize on axis", normalize_axis, level=Logger.INFO)
if len(resulting_rows):
scaled_resulting_rows = np.array(resulting_rows)
# print(scaled_resulting_rows.shape)
scaled_resulting_rows /= scaled_resulting_rows[:, normalize_axis:normalize_axis + 1]
delta = np.abs(scaled_resulting_rows - guessed_row[np.newaxis, :])
if min(np.nanmax(delta, axis=1)) < 1e-2:
logger.log("Likely have found this node before", level=Logger.ERROR)
raise AcceptableFailure()
if CHEATING:
# Check our work against the ground truth entries in the corresponding matrix
layers = cheat_get_inner_layers(all_criticals[list(component)[0]])
layer_vals = [np.min(np.abs(x)) for x in layers]
which_layer = np.argmin(layer_vals)
M = A[which_layer]
which_neuron = which_is_zero(which_layer, layers)
logger.log("Neuron corresponds to", which_neuron, level=Logger.INFO)
if which_layer != LAYER:
which_neuron = 0
normalize_axis = 0
actual_row = M[:, which_neuron] / M[normalize_axis, which_neuron]
actual_row = actual_row[:guessed_row.shape[0]]
do_print_err = np.any(np.isnan(guessed_row))
if which_layer == LAYER:
error = np.max(np.abs(np.abs(guessed_row) - np.abs(actual_row)))
else:
error = 1e6
logger.log('max error', "%0.8f" % error, len(component), level=Logger.INFO)
if (error > 1e-4 * len(guessed_row) and debug) or do_print_err:
logger.log('real ', " ".join("%2.3f" % x for x in actual_row), level=Logger.INFO)
logger.log('guess', " ".join("%2.3f" % x for x in guessed_row), level=Logger.INFO)
logger.log('gap', " ".join("%2.3f" % (np.abs(x - y)) for x, y in zip(guessed_row, actual_row)),
level=Logger.INFO)
logger.log("--", level=Logger.INFO)
for row in possible_matrix_rows:
logger.log('posbl', " ".join("%2.3f" % x for x in row / row[normalize_axis]), level=Logger.INFO)
logger.log("--", level=Logger.INFO)
scale = 10 ** int(np.round(np.log(np.nanmedian(np.abs(possible_matrix_rows))) / np.log(10)))
possible_matrix_rows /= scale
for row in possible_matrix_rows:
logger.log('posbl', " ".join("%2.3f" % x for x in row), level=Logger.INFO)
if np.any(np.isnan(guessed_row)) and c_count < expected_neurons:
logger.log("Got NaN, need more data", len(component) / sum(map(len, components)), 1 / sizes[LAYER + 1],
level=Logger.INFO)
if len(component) >= 3:
if c_count < expected_neurons:
failure = GatherMoreData([all_criticals[x] for x in component])
skips_because_of_nan += 1
continue
guessed_row[np.isnan(guessed_row)] = 0
if c_count < expected_neurons and len(component) >= 3:
resulting_rows.append(guessed_row)
resulting_examples.append([all_criticals[x] for x in component])
else:
logger.log("Don't add it to the set", level=Logger.INFO)
# We set failure when something went wrong but we want to defer crashing
# (so that we can use the partial solution)
if len(resulting_rows) + skips_because_of_nan < expected_neurons and len(all_ratios) < DEAD_NEURON_THRESHOLD:
logger.log("We have not explored all neurons. Do more random search", len(resulting_rows), skips_because_of_nan,
expected_neurons, level=Logger.INFO)
raise AcceptableFailure(partial_solution=(np.array(resulting_rows), np.array(resulting_examples)))
else:
logger.log("At this point, we just assume the neuron must be dead", level=Logger.INFO)
while len(resulting_rows) < expected_neurons:
resulting_rows.append(np.zeros_like((resulting_rows[0])))
resulting_examples.append([np.zeros_like(resulting_examples[0][0])])
# Here we know it's a GatherMoreData failure, but we want to only do this
# if there was enough data for everything else
if failure is not None:
logger.log("Need to raise a previously generated failure.", level=Logger.INFO)
raise failure
logger.log("Successfully returning a solution attempt.\n", level=Logger.INFO)
return resulting_examples, resulting_rows
def ratio_normalize(possible_matrix_rows):
# We get a set of a bunch of numbers
# a1 b1 c1 d1 e1 f1 g1
# a2 b2 c2 d2 e2 f2 g2
# such that some of them are nan
# We want to compute the pairwise ratios ignoring the nans
now = time.time()
ratio_evidence = [[[] for _ in range(possible_matrix_rows.shape[1])] for _ in range(possible_matrix_rows.shape[1])]
for row in possible_matrix_rows:
for i in range(len(row)):
for j in range(len(row)):
ratio_evidence[i][j].append(row[i] / row[j])
if len(ratio_evidence) > 100:
ratio_evidence = np.array(ratio_evidence, dtype=np.float32)
else:
ratio_evidence = np.array(ratio_evidence, dtype=np.float64)
medians = np.nanmedian(ratio_evidence, axis=2)
errors = np.nanstd(ratio_evidence, axis=2) / np.sum(~np.isnan(ratio_evidence), axis=2) ** .5
errors += 1e-2 * (np.sum(~np.isnan(ratio_evidence), axis=2) == 1)
errors /= np.abs(medians)
errors[np.isnan(errors)] = 1e6
ratio_evidence = medians
last_nan_count = 1e8
last_total_cost = 1e8
while (np.sum(np.isnan(ratio_evidence)) < last_nan_count or last_total_cost < np.sum(errors) * .9) and False:
last_nan_count = np.sum(np.isnan(ratio_evidence))
last_total_cost = np.sum(errors)
logger.log(".", level=Logger.INFO)
logger.log("Takenc", time.time() - now, level=Logger.INFO)
logger.log('nan count', last_nan_count, level=Logger.INFO)
logger.log('total cost', last_total_cost, level=Logger.INFO)
cost_i_over_j = ratio_evidence[:, :, np.newaxis]
cost_j_over_k = ratio_evidence
cost_i_over_k = cost_i_over_j * cost_j_over_k
del cost_i_over_j, cost_j_over_k
logger.log(cost_i_over_k.shape, cost_i_over_k.dtype, level=Logger.INFO)
error_i_over_j = errors[:, :, np.newaxis]
error_j_over_k = errors
error_i_over_k = error_i_over_j + error_j_over_k
best_indexs = np.nanargmin(error_i_over_k, axis=1)
best_errors = np.nanmin(error_i_over_k, axis=1)
del error_i_over_j, error_j_over_k, error_i_over_k
cost_i_over_k_new = []
for i in range(len(best_indexs)):
cost_i_over_k_new.append(cost_i_over_k[i].T[np.arange(len(best_indexs)), best_indexs[i]])
cost_i_over_k = np.array(cost_i_over_k_new)
which = best_errors < errors
ratio_evidence = cost_i_over_k * which + ratio_evidence * (1 - which)
errors = best_errors
# Choose the column with the fewest nans to return
nancount = np.sum(np.isnan(ratio_evidence), axis=0)
# print("Column nan count", nancount)
column_ok = np.min(nancount) == nancount
best = (None, np.inf)
cost_i_over_j = ratio_evidence[:, :, np.newaxis]
cost_j_over_k = ratio_evidence
cost_i_over_k = cost_i_over_j * cost_j_over_k
cost_i_j_k = cost_i_over_k
# cost from i through j to k
for column in range(len(column_ok)):
if not column_ok[column]:
continue
quality = np.nansum(np.abs(cost_i_j_k[:, column, :] - ratio_evidence))
# print('q',quality)
if quality < best[1]:
best = (column, quality)
column, best_error = best
return ratio_evidence[:, column], column, best_error
def gather_ratios(critical_points, known_T, check_fn, LAYER, COUNT):
this_layer_critical_points = []
logger.log("Gathering", COUNT, "critical points", level=Logger.INFO)
for point in critical_points:
if LAYER > 0:
if any(np.any(np.abs(x) < 1e-5) for x in known_T.get_hidden_layers(point)):
continue
if CHEATING:
if np.any(np.abs(cheat_get_inner_layers(point)[0]) < 1e-10):
logger.log(cheat_get_inner_layers(point), level=Logger.INFO)
logger.log("Looking at one I don't need to", level=Logger.INFO)
if LAYER > 0 and np.sum(known_T.forward(point) != 0) <= 1:
logger.log("Not enough hidden values are active to get meaningful data", level=Logger.INFO)
continue
if not check_fn(point):
# print("Check function rejected it")
continue
if CHEATING:
logger.log("What layer is this neuron on (by cheating)?",
[(np.min(np.abs(x)), np.argmin(np.abs(x))) for x in cheat_get_inner_layers(point)],
level=Logger.INFO)
tmp = Tracker().query_count
for EPS in [GRAD_EPS, GRAD_EPS / 10, GRAD_EPS / 100]:
try:
normal = get_ratios_lstsq(LAYER, [point], [range(DIM)], known_T, eps=EPS)[0].flatten()
# normal = get_ratios([point], [range(DIM)], eps=EPS)[0].flatten()
break
except AcceptableFailure:
logger.log("Try again with smaller eps", level=Logger.INFO)
pass
# print("LSTSQ Delta queries", query_count-tmp)
this_layer_critical_points.append((normal, point))
# coupon collector: we need nlogn points.
logger.log("Up to", len(this_layer_critical_points), 'of', COUNT, level=Logger.INFO)
if len(this_layer_critical_points) >= COUNT:
break
return this_layer_critical_points
def compute_layer_values(critical_points, known_T, LAYER):
if LAYER == 0:
COUNT = neuron_count[LAYER + 1] * 3
else:
COUNT = neuron_count[LAYER + 1] * np.log(sizes[LAYER + 1]) * 3
# type: [(ratios, critical_point)]
this_layer_critical_points = []
partial_weights = None
partial_biases = None
def check_fn(point):
if partial_weights is None:
return True
hidden = matmul(known_T.forward(point, with_relu=True), partial_weights.T, partial_biases)
if np.any(np.abs(hidden) < 1e-4):
return False
return True
logger.log("", level=Logger.INFO)
logger.log("Start running critical point search to find neurons on layer", LAYER, level=Logger.INFO)
while True:
logger.log("At this iteration I have", len(this_layer_critical_points), "critical points", level=Logger.INFO)
def reuse_critical_points():
for witness in critical_points:
yield witness
this_layer_critical_points.extend(gather_ratios(reuse_critical_points(), known_T, check_fn,
LAYER, COUNT))
logger.log("Query count after that search:", Tracker().query_count, level=Logger.INFO)
logger.log("And now up to ", len(this_layer_critical_points), "critical points", level=Logger.INFO)
## filter out duplicates
filtered_points = []
# Let's not add points that are identical to onees we've already done.
for i, (ratio1, point1) in enumerate(this_layer_critical_points):
for ratio2, point2 in this_layer_critical_points[i + 1:]:
if np.sum((point1 - point2) ** 2) ** .5 < 1e-10:
break
else:
filtered_points.append((ratio1, point1))
this_layer_critical_points = filtered_points
logger.log("After filtering duplicates we're down to ", len(this_layer_critical_points), "critical points",
level=Logger.INFO)
logger.log("Start trying to do the graph solving", level=Logger.INFO)
try:
critical_groups, extracted_normals = graph_solve([x[0] for x in this_layer_critical_points],
[x[1] for x in this_layer_critical_points],
neuron_count[LAYER + 1],
LAYER=LAYER,
debug=True)
break
except GatherMoreData as e:
logger.log("Graph solving failed because we didn't explore all sides of at least one neuron",
level=Logger.INFO)
logger.log("Fall back to the hyperplane following algorithm in order to get more data", level=Logger.INFO)
def mine(r):
while len(r) > 0:
logger.log("Yielding a point", level=Logger.INFO)
yield r[0]
r = r[1:]
logger.log("No more to give!", level=Logger.INFO)
prev_T = KnownT(known_T.A[:-1], known_T.B[:-1])
_, more_critical_points = sign_recovery.solve_layer_sign(prev_T, known_T.A[-1], known_T.B[-1], mine(e.data),
LAYER - 1, already_checked_critical_points=True,
only_need_positive=True)
logger.log("Add more", len(more_critical_points), level=Logger.INFO)
this_layer_critical_points.extend(gather_ratios(more_critical_points, known_T, check_fn,
LAYER, 1e6))
logger.log("Done adding", level=Logger.INFO)
COUNT = neuron_count[LAYER + 1]
except AcceptableFailure as e:
logger.log("Graph solving failed; get more points", level=Logger.INFO)
COUNT = neuron_count[LAYER + 1]
if 'partial_solution' in dir(e):
if len(e.partial_solution[0]) > 0:
partial_weights, corresponding_examples = e.partial_solution
logger.log("Got partial solution with shape", partial_weights.shape, level=Logger.INFO)
if CHEATING:
logger.log("Corresponding to",
np.argmin(
np.abs(cheat_get_inner_layers([x[0] for x in corresponding_examples])[LAYER]),
axis=1), level=Logger.INFO)
partial_biases = []
for weight, examples in zip(partial_weights, corresponding_examples):
hidden = known_T.forward(examples, with_relu=True)
logger.log("hidden", np.array(hidden).shape, level=Logger.INFO)
bias = -np.median(np.dot(hidden, weight))
partial_biases.append(bias)
partial_biases = np.array(partial_biases)
logger.log("Number of critical points per cluster", [len(x) for x in critical_groups], level=Logger.INFO)
point_per_class = [x[0] for x in critical_groups]
extracted_normals = np.array(extracted_normals).T
# Compute the bias because we know wx+b=0
extracted_bias = [matmul(known_T.forward(point_per_class[i], with_relu=True), extracted_normals[:, i], c=None) for i
in range(neuron_count[LAYER + 1])]
# Don't forget to negate it.
# That's important.
# No, I definitely didn't forget this line the first time around.
extracted_bias = -np.array(extracted_bias)
# For the failed-to-identify neurons, set the bias to zero
extracted_bias *= np.any(extracted_normals != 0, axis=0)[:, np.newaxis]
if CHEATING:
# Compute how far we off from the true matrix
real_scaled = A[LAYER] / A[LAYER][0]
extracted_scaled = extracted_normals / extracted_normals[0]
mask = []
reorder_rows = []
for i in range(len(extracted_bias)):
which_idx = np.argmin(np.sum(np.abs(real_scaled - extracted_scaled[:, [i]]), axis=0))
reorder_rows.append(which_idx)
mask.append((A[LAYER][0, which_idx]))
logger.log('matrix norm difference', np.sum(np.abs(extracted_normals * mask - A[LAYER][:, reorder_rows])),
level=Logger.INFO)
else:
mask = [1] * len(extracted_bias)
return extracted_normals, extracted_bias, mask
|
py | 1a3e61f4f88e2f89f299c04b73c269df79070590 | from PIL import Image
import pytesseract
import argparse
import os
import time
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", default="post",
help="path of folder with images to be OCR'd, folder should not include another forlders")
args = vars(ap.parse_args())
path=args["path"]+"/"
print(path)
postcoden=["3705"]
brievenAlsText=[]
brievenAlsFoto=os.listdir(path)
print(brievenAlsFoto)
counter=0
for im in brievenAlsFoto:
img = Image.open(path+im)
size = img.size
BottomL=(size[0]*3132)/3732
BottomH=(size[1]*1616)/2616
TopL=(size[0]*300)/3732
TopH=(size[1]*500)/2616
area = (TopL,TopH,BottomL,BottomH)
# img = img.crop(area)
if counter > 2:
img.show()
brievenAlsText.append(pytesseract.image_to_string(img, lang='eng'))
counter+=1
a=0
found=0
postcode=""
adres=[]
print(brievenAlsText[-2])
print(brievenAlsText[-1])
for text in brievenAlsText:
voriglen=len(adres)
dublicate=0
for letter in text:
try:
if type(eval(letter))==int:
a+=1
postcode+=letter
if a == 4 and postcode in postcoden:
found=1
if dublicate == 2:
found=0
adres.pop(-1)
adres.append("skip")
break
dublicate+=1
continue
except:
a=0
if found==0:
postcode=""
if found <=3 and found!=0:
postcode+=letter
found+=1
if found==4:
text = text.split(postcode)
text[0] = text[0].strip(" ")
text[0] = text[0].split(" ")
huisnummer = text[0][-1]
adres.append(postcode + " " + huisnummer)
text=text[1]
postcode=""
found = 0
dublicate+=1
continue
if len(adres)==voriglen:
adres.append("skip")
for i in range(len(adres)):
adres[i]=adres[i].strip("\n")
if "\n" in adres[i]:
adres[i]=adres[i].split("\n")
adres[i]=adres[i][0]
print(adres)
print(round(time.perf_counter(),2))
|
py | 1a3e621eb31dec15383f049fab54081bf6d1a767 | import inspect
import pytest
import test_aide.equality as eh
from collections import defaultdict
from unittest.mock import _get_target
try:
import pandas as pd
has_pandas = True
except ModuleNotFoundError:
has_pandas = False
try:
import numpy as np
has_numpy = True
except ModuleNotFoundError:
has_numpy = False
# potential functions that test_aide.equality.assert_equal_dispatch can call
potential_assert_functions = [
"test_aide.equality.assert_frame_equal_msg",
"test_aide.equality.assert_series_equal_msg",
"test_aide.equality.assert_index_equal_msg",
"test_aide.equality.assert_list_tuple_equal_msg",
"test_aide.equality.assert_dict_equal_msg",
"test_aide.equality.assert_equal_msg",
"test_aide.equality.assert_np_nan_eqal_msg",
"test_aide.equality.assert_array_equal_msg",
]
def test_arguments():
"""Test arguments for arguments of test_aide.equality.assert_equal_dispatch."""
expected_arguments = ["expected", "actual", "msg"]
arg_spec = inspect.getfullargspec(eh.assert_equal_dispatch)
arguments = arg_spec.args
assert len(expected_arguments) == len(
arguments
), f"Incorrect number of arguments -\n Expected: {len(expected_arguments)}\n Actual: {len(arguments)}"
for i, (e, a) in enumerate(zip(expected_arguments, arguments)):
assert e == a, f"Incorrect arg at index {i} -\n Expected: {e}\n Actual: {a}"
default_values = arg_spec.defaults
assert (
default_values is None
), f"Unexpected default values -\n Expected: None\n Actual: {default_values}"
def test_different_types_error():
"""Test that an exception is raised if expected and actual are different types."""
with pytest.raises(TypeError, match="type mismatch"):
eh.assert_equal_dispatch(expected=1, actual=1.0, msg="test_msg")
@pytest.mark.skipif(not has_pandas, reason="pandas not installed")
@pytest.mark.parametrize(
"test_function_call, expected_value, pd_testing_function",
[
# the None if not has_pandas below is to stop pd being accessed before the test is skipped
(
"test_aide.equality.assert_frame_equal_msg",
None if not has_pandas else pd.DataFrame({"a": [1, 2]}),
None if not has_pandas else pd.testing.assert_frame_equal,
),
(
"test_aide.equality.assert_series_equal_msg",
None if not has_pandas else pd.Series([1, 2]),
None if not has_pandas else pd.testing.assert_series_equal,
),
(
"test_aide.equality.assert_index_equal_msg",
None if not has_pandas else pd.Index([1, 2]),
None if not has_pandas else pd.testing.assert_index_equal,
),
],
)
def test_pd_types_correct_function_call(
mocker, test_function_call, expected_value, pd_testing_function
):
"""Test that the correct 'sub' assert function is called if expected for the given input type - and none
of the other functions are called.
"""
# test_function_call is the function to check has been called
# expected_value is the dummy value to use when calling eh.assert_equal_dispatch, so test_function_call will be called
# pd_testing_function is the specific pd.testing function that should be used to compare that type
# patch all the potential functions that can be called by test_aide.equality.assert_equal_dispatch
for x in potential_assert_functions:
mocker.patch(x)
actual_value = expected_value
msg_value = "test_msg"
eh.assert_equal_dispatch(
expected=expected_value, actual=actual_value, msg=msg_value
)
getter, attribute = _get_target(test_function_call)
mocked_function_call = getattr(getter(), attribute)
assert (
mocked_function_call.call_count == 1
), f"Unexpected number of calls to {test_function_call} -\n Expected: 1\n Actual: {mocked_function_call.call_count}"
call_1_args = mocked_function_call.call_args_list[0]
call_1_pos_args = call_1_args[0]
call_1_kwargs = call_1_args[1]
call_1_expected_pos_arg = (expected_value, actual_value, msg_value)
assert len(call_1_pos_args) == len(
call_1_expected_pos_arg
), f"Unexpected number of positional args in call to {test_function_call} -\n Expected: {len(call_1_expected_pos_arg)}\n Actual: {len(call_1_pos_args)}"
pd_testing_function(call_1_expected_pos_arg[0], call_1_pos_args[0])
pd_testing_function(call_1_expected_pos_arg[1], call_1_pos_args[1])
e = call_1_expected_pos_arg[2]
a = call_1_pos_args[2]
assert (
e == a
), f"Unexpected last positional arg in call to {test_function_call} -\n Expected: {e}\n Actual: {a}"
assert (
call_1_kwargs == {}
), f"Unexpected keyword args in call to {test_function_call} -\n Expected: None\n Actual: {call_1_kwargs}"
# get functions that should not have been called
test_functions_not_call = list(
set(potential_assert_functions) - set([test_function_call])
)
# loop through each one and test it has not been called
for test_function_not_call in test_functions_not_call:
getter, attribute = _get_target(test_function_not_call)
mocked_function_not_call = getattr(getter(), attribute)
assert (
mocked_function_not_call.call_count == 0
), f"Unexpected number of calls to {test_function_not_call} -\n Expected: 0\n Actual: {mocked_function_not_call.call_count}"
@pytest.mark.skipif(not has_numpy, reason="numpy not installed")
@pytest.mark.parametrize(
"expected_value",
# the None if not has_numpy below is to stop np being accessed before the test is skipped
[
None if not has_numpy else np.array([]),
None if not has_numpy else np.array([0, 1, 2]),
None if not has_numpy else np.array([[1, 2], [3, 4]]),
None if not has_numpy else np.array([np.nan, np.nan]),
],
)
def test_np_array_correct_function_call(mocker, expected_value):
"""Test that assert_array_equal_msg called correctly when expected is a numpy array"""
# function to check has been called
test_function_call = "test_aide.equality.assert_array_equal_msg"
# patch all the potential functions that can be called by test_aide.equality.assert_equal_dispatch
for x in potential_assert_functions:
mocker.patch(x)
actual_value = expected_value
msg_value = "test_msg"
eh.assert_equal_dispatch(
expected=expected_value, actual=actual_value, msg=msg_value
)
getter, attribute = _get_target(test_function_call)
mocked_function_call = getattr(getter(), attribute)
assert (
mocked_function_call.call_count == 1
), f"Unexpected number of calls to {test_function_call} with {expected_value} -\n Expected: 1\n Actual: {mocked_function_call.call_count}"
call_1_args = mocked_function_call.call_args_list[0]
call_1_pos_args = call_1_args[0]
call_1_kwargs = call_1_args[1]
call_1_expected_pos_arg = (expected_value, actual_value, msg_value)
assert len(call_1_pos_args) == len(
call_1_expected_pos_arg
), f"Unexpected number of positional args in call to {test_function_call} -\n Expected: {len(call_1_expected_pos_arg)}\n Actual: {len(call_1_pos_args)}"
np.testing.assert_array_equal(call_1_expected_pos_arg[0], call_1_pos_args[0])
np.testing.assert_array_equal(call_1_expected_pos_arg[1], call_1_pos_args[1])
e = call_1_expected_pos_arg[2]
a = call_1_pos_args[2]
assert (
e == a
), f"Unexpected last positional arg in call to {test_function_call} -\n Expected: {e}\n Actual: {a}"
assert (
call_1_kwargs == {}
), f"Unexpected keyword args in call to {test_function_call} -\n Expected: None\n Actual: {call_1_kwargs}"
# get functions that should not have been called
test_functions_not_call = list(
set(potential_assert_functions) - set([test_function_call])
)
# loop through each one and test it has not been called
for test_function_not_call in test_functions_not_call:
getter, attribute = _get_target(test_function_not_call)
mocked_function_not_call = getattr(getter(), attribute)
assert (
mocked_function_not_call.call_count == 0
), f"Unexpected number of calls to {test_function_not_call} -\n Expected: 0\n Actual: {mocked_function_not_call.call_count}"
@pytest.mark.parametrize(
"expected_function_called, value_to_pass",
[
("test_aide.equality.assert_list_tuple_equal_msg", [1, 2]),
("test_aide.equality.assert_list_tuple_equal_msg", (1, 2)),
("test_aide.equality.assert_dict_equal_msg", {"a": 1}),
("test_aide.equality.assert_dict_equal_msg", defaultdict(None, {"a": 1})),
("test_aide.equality.assert_equal_msg", 1),
("test_aide.equality.assert_equal_msg", 1.0),
("test_aide.equality.assert_equal_msg", "a"),
("test_aide.equality.assert_equal_msg", False),
("test_aide.equality.assert_equal_msg", None),
],
)
def test_non_dataframe_correct_function_call(
mocker, expected_function_called, value_to_pass
):
"""Test that the correct 'sub' assert function is called if expected for the given input type - and none
of the other functions are called.
"""
# function to check has been called
test_function_call = expected_function_called
# patch all the potential functions that can be called by test_aide.equality.assert_equal_dispatch
for x in potential_assert_functions:
mocker.patch(x)
expected_value = value_to_pass
actual_value = expected_value
msg_value = "test_msg"
eh.assert_equal_dispatch(
expected=expected_value, actual=actual_value, msg=msg_value
)
getter, attribute = _get_target(test_function_call)
mocked_function_call = getattr(getter(), attribute)
assert (
mocked_function_call.call_count == 1
), f"Unexpected number of calls to {test_function_call} with {value_to_pass} -\n Expected: 1\n Actual: {mocked_function_call.call_count}"
call_1_args = mocked_function_call.call_args_list[0]
call_1_pos_args = call_1_args[0]
call_1_kwargs = call_1_args[1]
call_1_expected_pos_arg = (expected_value, actual_value, msg_value)
assert len(call_1_pos_args) == len(
call_1_expected_pos_arg
), f"Unexpected number of positional args in call to {test_function_call} -\n Expected: {len(call_1_expected_pos_arg)}\n Actual: {len(call_1_pos_args)}"
for i, (e, a) in enumerate(zip(call_1_expected_pos_arg, call_1_pos_args)):
assert (
e == a
), f"Unexpected positional arg in index {i} in call to {test_function_call} -\n Expected: {e}\n Actual: {a}"
assert (
call_1_kwargs == {}
), f"Unexpected keyword args in call to {test_function_call} -\n Expected: None\n Actual: {call_1_kwargs}"
# get functions that should not have been called
test_functions_not_call = list(
set(potential_assert_functions) - set([test_function_call])
)
# loop through each one and test it has not been called
for test_function_not_call in test_functions_not_call:
getter, attribute = _get_target(test_function_not_call)
mocked_function_not_call = getattr(getter(), attribute)
assert (
mocked_function_not_call.call_count == 0
), f"Unexpected number of calls to {test_function_not_call} -\n Expected: 0\n Actual: {mocked_function_not_call.call_count}"
@pytest.mark.skipif(not has_numpy, reason="numpy not installed")
def test_nan_correct_function_call(mocker):
"""Test that the correct 'sub' assert function is called as expected if the input
type is np.NaN - and none of the other functions are called.
"""
# function to check has been called
test_function_call = "test_aide.equality.assert_np_nan_eqal_msg"
# patch all the potential functions that can be called by test_aide.equality.assert_equal_dispatch
for x in potential_assert_functions:
mocker.patch(x)
expected_value = np.NaN
actual_value = expected_value
msg_value = "test_msg"
eh.assert_equal_dispatch(
expected=expected_value, actual=actual_value, msg=msg_value
)
getter, attribute = _get_target(test_function_call)
mocked_function_call = getattr(getter(), attribute)
assert (
mocked_function_call.call_count == 1
), f"Unexpected number of calls to {test_function_call} with {expected_value} -\n Expected: 1\n Actual: {mocked_function_call.call_count}"
call_1_args = mocked_function_call.call_args_list[0]
call_1_pos_args = call_1_args[0]
call_1_kwargs = call_1_args[1]
call_1_expected_pos_arg = (expected_value, actual_value, msg_value)
assert len(call_1_pos_args) == len(
call_1_expected_pos_arg
), f"Unexpected number of positional args in call to {test_function_call} -\n Expected: {len(call_1_expected_pos_arg)}\n Actual: {len(call_1_pos_args)}"
for i, (e, a) in enumerate(zip(call_1_expected_pos_arg, call_1_pos_args)):
if type(e) is float and np.isnan(e):
assert np.isnan(e) and np.isnan(
a
), f"Unexpected positional arg in index {i} in call to {test_function_call} -\n Expected: {e}\n Actual: {a}"
else:
assert (
e == a
), f"Unexpected positional arg in index {i} in call to {test_function_call} -\n Expected: {e}\n Actual: {a}"
assert (
call_1_kwargs == {}
), f"Unexpected keyword args in call to {test_function_call} -\n Expected: None\n Actual: {call_1_kwargs}"
# get functions that should not have been called
test_functions_not_call = list(
set(potential_assert_functions) - set([test_function_call])
)
# loop through each one and test it has not been called
for test_function_not_call in test_functions_not_call:
getter, attribute = _get_target(test_function_not_call)
mocked_function_not_call = getattr(getter(), attribute)
assert (
mocked_function_not_call.call_count == 0
), f"Unexpected number of calls to {test_function_not_call} -\n Expected: 0\n Actual: {mocked_function_not_call.call_count}"
def test_other_type_equality_checked():
"""Test if types not directly checked for in assert_equal_dispatch are passed then they are compared using ==."""
try:
eh.assert_equal_dispatch(
expected=set(["a"]), actual=set(["a"]), msg="test message b"
)
except Exception as err:
pytest.fail(
f"eh.assert_equal_dispatch failed for equal sets call with error; {err}"
)
with pytest.raises(AssertionError, match="test message"):
eh.assert_equal_dispatch(
expected=set(["a"]), actual=set(["a", "b"]), msg="test message"
)
|
py | 1a3e635b5855033bd6df805b3260ecf80759b854 | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.piratesgui.BoardingPermissionPanel
from pandac.PandaModules import *
from direct.gui.DirectGui import DGG
from pirates.piratesgui.BorderFrame import BorderFrame
from pirates.piratesgui.GuiPanel import GuiPanel
from pirates.piratesgui.GuiButton import GuiButton
from pirates.piratesgui.DialogButton import DialogButton
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui.CheckButton import CheckButton
from pirates.piratesbase import PiratesGlobals
class BoardingPermissionPanel(GuiPanel):
__module__ = __name__
def __init__(self, parent, *args, **kw):
self.guiSetup = False
optiondefs = (('parent', parent, None), ('pos', (-0.58, 0, -0.09), None), ('command', None, None), ('extraArgs', [], None), ('ownShip', 0, None))
self.defineoptions(kw, optiondefs)
GuiPanel.__init__(self, title=PLocalizer.BoardPermTitle, h=0.8, w=0.5, titleSize=1.5, showClose=False, **kw)
self.initialiseoptions(BoardingPermissionPanel)
self.titleLabel['text_align'] = TextNode.ACenter
self.titleLabel.setPos(0.23, 0, 0.72)
self.setupGui()
return
def destroy(self):
self.button = None
self.background = None
self.friendsButton = None
self.crewButton = None
self.guildButton = None
self.publicButton = None
GuiPanel.destroy(self)
return
def setupGui(self):
self.destroyGui()
if not self.guiSetup:
self.button = DialogButton(parent=self, buttonStyle=DialogButton.NO, pos=(0.25,
0,
0.08), text=PLocalizer.lClose, helpPos=(-0.4, 0, 0.03), helpDelay=0.3, command=self['command'], extraArgs=self['extraArgs'])
self.background = BorderFrame(parent=self, pos=(0.05, 0, 0.05), frameSize=[0.0, 0.4, 0.1, 0.6], bgColorScale=VBase4(0, 0, 0, 0.75), bgTransparency=1, flatten=0)
if self['ownShip']:
state = DGG.NORMAL
else:
state = DGG.DISABLED
ship = localAvatar.getShip()
if ship:
friendState = ship.getAllowFriendState()
crewState = ship.getAllowCrewState()
guildState = ship.getAllowGuildState()
publicState = ship.getAllowPublicState()
else:
friendState = 0
crewState = 0
guildState = 0
publicState = 0
buttonOptions = {'parent': self.background, 'state': state, 'relief': None, 'pos': (0.06, 0, 0.53), 'scale': 0.3, 'text': PLocalizer.CrewBoardingAccessAllowFriends, 'value': friendState, 'text_pos': (0.167, -0.06, 0), 'text0_fg': PiratesGuiGlobals.TextFG1, 'text1_fg': PiratesGuiGlobals.TextFG1, 'text2_fg': PiratesGuiGlobals.TextFG1, 'text3_fg': PiratesGuiGlobals.TextFG9, 'text_font': PiratesGlobals.getInterfaceFont(), 'text_scale': 0.15, 'text_shadow': (0, 0, 0, 1), 'text_align': TextNode.ALeft, 'command': self.allowFriends}
self.friendsButton = CheckButton(**buttonOptions)
buttonOptions['text'] = PLocalizer.CrewBoardingAccessAllowCrew
buttonOptions['pos'] = (buttonOptions['pos'][0], buttonOptions['pos'][1], buttonOptions['pos'][2] - 0.12)
buttonOptions['command'] = self.allowCrew
buttonOptions['value'] = crewState
self.crewButton = CheckButton(**buttonOptions)
buttonOptions['text'] = PLocalizer.CrewBoardingAccessAllowGuild
buttonOptions['pos'] = (buttonOptions['pos'][0], buttonOptions['pos'][1], buttonOptions['pos'][2] - 0.12)
buttonOptions['command'] = self.allowGuild
buttonOptions['value'] = guildState
self.guildButton = CheckButton(**buttonOptions)
buttonOptions['text'] = PLocalizer.CrewBoardingAccessAllowPublic
buttonOptions['pos'] = (buttonOptions['pos'][0], buttonOptions['pos'][1], buttonOptions['pos'][2] - 0.12)
buttonOptions['command'] = self.allowPublic
buttonOptions['value'] = publicState
self.publicButton = CheckButton(**buttonOptions)
self.guiSetup = True
return
def destroyGui(self):
if self.guiSetup:
self.background.destroy()
self.background = None
self.friendsButton.destroy()
self.friendsButton = None
self.crewButton.destroy()
self.crewButton = None
self.guildButton.destroy()
self.guildButton = None
self.publicButton.destroy()
self.publicButton = None
self.button.destroy()
self.button = None
self.guiSetup = False
return
def allowFriends(self, allow):
if self['ownShip']:
ship = localAvatar.getShip()
if ship:
ship.b_setAllowFriendState(allow)
def allowCrew(self, allow):
if self['ownShip']:
ship = localAvatar.getShip()
if ship:
ship.b_setAllowCrewState(allow)
def allowGuild(self, allow):
if self['ownShip']:
ship = localAvatar.getShip()
if ship:
ship.b_setAllowGuildState(allow)
def allowPublic(self, allow):
if self['ownShip']:
ship = localAvatar.getShip()
if ship:
ship.b_setAllowPublicState(allow)
def setAllowFriends(self, allow):
self.friendsButton['value'] = allow
def setAllowCrew(self, allow):
self.crewButton['value'] = allow
def setAllowGuild(self, allow):
self.guildButton['value'] = allow
def setAllowPublic(self, allow):
self.publicButton['value'] = allow |
py | 1a3e64d4c9ed583c7491079cdde0158f3b90fe9c | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-03 15:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Boss',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='/boss/')),
('name', models.CharField(blank=True, max_length=100)),
('lvl', models.IntegerField(blank=True, null=True)),
('health', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='DifficultType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Raid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100)),
('lvl', models.IntegerField(blank=True, null=True)),
('difficult_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.DifficultType')),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.Game')),
],
),
migrations.CreateModel(
name='RaidGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.Game')),
],
),
migrations.CreateModel(
name='RaidGroupAvaliable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.BooleanField(default=False)),
('execution_date', models.DateField(blank=True, null=True)),
('boss', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.Boss')),
('difficult_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.DifficultType')),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.Game')),
('raid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.Raid')),
('raid_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.RaidGroup')),
],
),
migrations.AddField(
model_name='raidgroup',
name='raids',
field=models.ManyToManyField(through='raid.RaidGroupAvaliable', to='raid.Raid'),
),
migrations.AddField(
model_name='boss',
name='difficult_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.DifficultType'),
),
]
|
py | 1a3e656693ac2d53648f435ad47a133f2ee184fd | #{{{ Import
import numpy as np
pi = np.pi
#}}}
#{{{ Snell's Law
def deflection_angle(theta, n1, n2, deg=True):
"""Calculate deflection angle according to Snell's law.
Parameters
----------
theta : float
Angle of incidence.
n1 : float
Refractive index of the first medium.
n2 : float
Refraction index of the second medium.
deg : boolean, optional
True if theta is specified in degrees.
"""
if deg:
factor = pi/180.0
else:
factor = 1.0
return np.arcsin(n1*np.sin(theta*factor)/n2)/factor
#}}}
#{{{ Geometry utilities
#{{{ line_plane_intersection
def line_plane_intersection(pos,
dirVect,
plane_center,
normalVector,
diameter):
'''
Compute the intersection point between a line
and a plane
Parameters
----------
pos : array
The position of the end point of the line.
dirVert : array
The directional vector specifying the line.
plane_center : array
The position of the center of the plane.
normalVector: array
The normal vector of the plane.
diameter: float
The diameter of the plane.
Returns
-------
dict
The returned value is a dictionary with the following keys:
"Intersection Point": numpy array of the coordinates of the intersection point.
"isHit": A boolean value of whether the line intersects with the plane or not.
"distance": Distance between the origin of the line and the intersection point.
"distance from center": Distance between the center of the plane and the intersection point.
'''
#Make sure the inputs are ndarrays
pos = np.array(pos, dtype=np.float64)
dirVect = np.array(dirVect, dtype=np.float64)
plane_center = np.array(plane_center, dtype=np.float64)
normalVector = np.array(normalVector, dtype=np.float64)
diameter = float(diameter)
#Get a normalized vector along the plane
plVect = np.array([-normalVector[1], normalVector[0]])
plVect = plVect/np.linalg.norm(plVect)
#Normalize
dirVect = dirVect/np.linalg.norm(dirVect)
#Make sure that the plVect and dirVect are not parallel
if np.abs(np.dot(dirVect, plVect)) > 1 - 1e-10:
return {'Intersection Point': np.array((0.,0.)), 'isHit': False,
'distance': 0.0,
'distance from center': 0.0}
#Solve line equations to get the intersection point
M = np.vstack((dirVect, -plVect)).T
ans = np.linalg.solve(M, plane_center - pos)
intersection_point = pos + ans[0]*dirVect
#How far the intersection point is from the center
#of the plane
dist_from_center = np.abs(ans[1])
if dist_from_center > diameter/2.0\
or ans[0] < 0.\
or np.dot(dirVect, normalVector) > 0.:
hit = False
else:
hit = True
return {'Intersection Point': intersection_point, 'isHit': hit,
'distance': np.abs(ans[0]),
'distance from center': ans[1]}
#}}}
#{{{ line_arc_intersection
def line_arc_intersection(pos,
dirVect,
chord_center,
chordNormVect,
invROC,
diameter,
verbose=False):
'''
Compute the intersection point between a line
and an arc.
Parameters
----------
pos : array
Origin of the line.
dirVect : array
Direction of the line.
chord_center : array
The center of the chord made by the arc.
chordNormVect : array
Normal vector of the chord.
invROC : float
Inverse of the ROC of the arc. Positive for concave surface.
diameter : float
Length of the chord.
verbose : boolean, optional
Prints useful information.
Returns
-------
dict
The returned value is a dictionary with the following keys:
"Intersection Point": numpy array of the coordinates of the intersection point.
"isHit": A boolean value of whether the line intersects with the plane or not.
"distance": Distance between the origin of the line and the intersection point.
"localNormVect": localNormVect,
"localNormAngle": localNormAngle.
'''
#Make sure the inputs are ndarrays
pos = np.array(pos, dtype=np.float64)
dirVect = np.array(dirVect, dtype=np.float64)
chord_center = np.array(chord_center, dtype=np.float64)
chordNormVect = np.array(chordNormVect, dtype=np.float64)
invROC = float(invROC)
diameter = float(diameter)
#Normalize
dirVect = dirVect/np.linalg.norm(dirVect)
chordNormVect = chordNormVect/np.linalg.norm(chordNormVect)
#Check if the ROC is too large.
if np.abs(invROC) < 1e-5:
#It is almost a plane
ans = line_plane_intersection(pos, dirVect, chord_center, chordNormVect, diameter)
localNormVect = chordNormVect
localNormAngle = np.mod(np.arctan2(localNormVect[1],
localNormVect[0]), 2*pi)
ans['localNormVect'] = localNormVect
ans['localNormAngle'] = localNormAngle
return ans
ROC = 1/invROC
#Compute the center of the arc
theta = np.arcsin(diameter/(2*ROC))
l = ROC*np.cos(theta)
arc_center = chord_center + chordNormVect*l
#For convex surface, pos has to be outside the circle.
if ROC < 0 and np.linalg.norm(pos - arc_center) < np.abs(ROC):
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
#First, decompose the vector connecting from the arc_center
#to pos into the components parallel to the line and orthogonal to it.
# s is the component in the orthogonal direction and t is the one along
#the line.
#A vector orthogonal to the line
k = np.array([-dirVect[1], dirVect[0]])
#Solve the equation to decompose the vector pos-arc_center
M = np.vstack((k, -dirVect)).T
ans = np.linalg.solve(M, pos - arc_center)
s = ans[0]
t = ans[1]
if np.abs(s) > np.abs(ROC):
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
#Compute two cross points
#Work with the chord formed by the line and the circle.
#d is half the length of the chord.
d = np.sqrt(ROC**2 - s**2)
if ROC > 0:
intersection_point = k*s+arc_center + d*dirVect
localNormVect = arc_center - intersection_point
else:
intersection_point = k*s+arc_center - d*dirVect
localNormVect = intersection_point - arc_center
#Check if dirVect and the vector connecting from pos to intersection_point
#are pointing the same direction.
if np.dot(dirVect, intersection_point - pos) < 0:
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
#Normalize
localNormVect = localNormVect/np.linalg.norm(localNormVect)
localNormAngle = np.mod(np.arctan2(localNormVect[1],
localNormVect[0]), 2*pi)
#Check if the intersection point is within the
#diameter
v0 = - np.sign(ROC) * chordNormVect*(1-1e-16) #(1-1e-16) is necessary to avoid rounding error
v1 = intersection_point - arc_center
v1 = v1/np.linalg.norm(v1)*(1-1e-16)
if np.arccos(np.dot(v0,v1)) > np.abs(theta):
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
distance = np.linalg.norm(intersection_point - pos)
return {'Intersection Point': intersection_point, 'isHit': True,
'distance': distance, 'localNormVect': localNormVect,
'localNormAngle': localNormAngle}
#}}}
#{{{ vector_rotation_2D
def vector_rotation_2D(vect, angle):
"""Rotate a 2D vector by an angle.
Parameters
----------
vect : array
A 2D vector.
angle : float
Angle of rotation in radians.
Returns
-------
array
The rotated vector.
"""
vect = np.array(vect)
angle = float(angle)
M = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle),np.cos(angle)]])
return np.dot(M, vect)
#}}}
def vector_normalize(vect):
'''
Normalize a vector
Parameters
----------
vect : array
The vector to be normalized
Returns
-------
array
The normalized vector.
'''
return vect/np.linalg.norm(vect)
#{{{ normSpheric
def normSpheric(normAngle, invROC, dist_from_center):
'''
Returns the local normal angle of a spheric mirror
at a distance from the center.
Parameters
----------
normAngle : float
The angle formed by the normal vector of the mirror
at the center and the x-axis.
invROC : float
1/R, where R is the ROC of the mirror.
dist_from_center: float
The distance from the center of the point where
the local normal is requested.
This is a signed value.
For a mirror facing +x (the normal vector points
towards positive x direction), this distance
is positive for points with positive y coordinate,
and negative for points with negative y coordinate.
Returns
-------
float
The local normal angle of a spheric mirror
at a distance from the center.
'''
normAngle = np.mod(normAngle, 2*pi)
return np.mod(np.arcsin(- dist_from_center * invROC) + normAngle, 2*pi)
#}}}
#{{{ reflection and deflection angle
def refl_defl_angle(beamAngle, normAngle, n1, n2, invROC=None):
'''
Returns a tuples of reflection and deflection angles.
Parameters
----------
beamAngle : float
The angle formed by the propagation direction vector
of the incident beam and the x-axis.
normAngle : float
The angle formed by the normal vector of the surface
and the x-axis.
n1 : float
Index of refraction of the incident side medium.
n2 : float
Index of refraction of the transmission side medium.
invROC : float or None, optional
Inverse of the radius of curvature.
Returns
-------
6-tuple or 2-tuple
(reflAngle, deflAngle, Mrx, Mry, Mtx, Mty) or (reflAngle, deflAngle)
'''
beamAngle = np.mod(beamAngle, 2*pi)
normAngle = np.mod(normAngle, 2*pi)
incidentAngle = np.mod(beamAngle - normAngle, 2*pi) - pi
reflAngle = np.mod(normAngle - incidentAngle, 2*pi)
deflAngle = np.arcsin(n1*np.sin(incidentAngle)/n2)
deflAngle = np.mod(deflAngle + pi + normAngle, 2*pi)
if not invROC == None:
#Calculate ABCD matrices
#Absolute value of the incident angle
theta1 = np.abs(incidentAngle)
#For reflection
Mrx = np.array([[1., 0.], [-2*n1*invROC/np.cos(theta1), 1.]])
Mry = np.array([[1., 0.], [-2*n1*invROC*np.cos(theta1), 1.]])
#For transmission
theta2 = np.arcsin(n1*np.sin(theta1)/n2)
nex = (n2*np.cos(theta2)-n1*np.cos(theta1))/(np.cos(theta1)*np.cos(theta2))
Mtx = np.array([[np.cos(theta2)/np.cos(theta1), 0.],
[nex*invROC, np.cos(theta1)/np.cos(theta2)]])
ney = n2*np.cos(theta2)-n1*np.cos(theta1)
Mty = np.array([[1., 0.],[ney*invROC, 1.]])
return (reflAngle, deflAngle, Mrx, Mry, Mtx, Mty)
else:
return (reflAngle, deflAngle)
#}}}
#{{{ reflection and deflection angle for cylindrical surface
def cyl_refl_defl_angle(beamAngle, normAngle, n1, n2, invROC=None, curve_direction='h'):
'''
Returns a tuples of reflection and deflection angles for incidence of a beam into a cylindrical surface.
Parameters
----------
beamAngle : float
The angle formed by the propagation direction vector
of the incident beam and the x-axis.
normAngle : float
The angle formed by the normal vector of the surface
and the x-axis.
n1 : float
Index of refraction of the incident side medium.
n2 : float
Index of refraction of the transmission side medium.
invROC : float or None, optional
Inverse of the radius of curvature.
curve_direction : str, optional
Direction of curvature. Either 'h' or 'v'.
'''
beamAngle = np.mod(beamAngle, 2*pi)
normAngle = np.mod(normAngle, 2*pi)
incidentAngle = np.mod(beamAngle - normAngle, 2*pi) - pi
reflAngle = np.mod(normAngle - incidentAngle, 2*pi)
deflAngle = np.arcsin(n1*np.sin(incidentAngle)/n2)
deflAngle = np.mod(deflAngle + pi + normAngle, 2*pi)
if not invROC == None:
#Calculate ABCD matrices
#Absolute value of the incident angle
theta1 = np.abs(incidentAngle)
#For reflection
if curve_direction == 'h':
Mrx = np.array([[1., 0.], [-2*n1*invROC/np.cos(theta1), 1.]])
Mry = np.array([[1., 0.], [0., 1.]])
else:
Mrx = np.array([[1., 0.], [0., 1.]])
Mry = np.array([[1., 0.], [-2*n1*invROC*np.cos(theta1), 1.]])
#For transmission
theta2 = np.arcsin(n1*np.sin(theta1)/n2)
nex = (n2*np.cos(theta2)-n1*np.cos(theta1))/(np.cos(theta1)*np.cos(theta2))
Mtx = np.array([[np.cos(theta2)/np.cos(theta1), 0.],
[nex*invROC, np.cos(theta1)/np.cos(theta2)]])
ney = n2*np.cos(theta2)-n1*np.cos(theta1)
Mty = np.array([[1., 0.],[ney*invROC, 1.]])
return (reflAngle, deflAngle, Mrx, Mry, Mtx, Mty)
else:
return (reflAngle, deflAngle)
#}}}
#}}}
#{{{ VariCAD utility functions
def vc_deflect(theta, theta1, n1, n2):
'''
Deflection angle helper function for VariCAD.
Parameters
----------
theta : float
Angle of the surface measured from right.
theta1 : float
Angle of the incident beam measured from right.
n1 : float
Index of refraction of the incident side medium.
n2 : float
Index of refraction of the transmission side medium.
Returns
-------
phi2 : float
Angle of the deflected beam measured from right.
'''
#Combert theta and theta1 to 0-360 format
if theta < 0:
theta = 360.0 + theta
if theta > 180:
theta = theta -180.0
if theta1 < 0:
theta1 = 360.0 + theta1
#Determine the incident angle
phi = abs(theta - theta1)
phi1 = 90.0-np.arcsin(np.abs(np.sin(pi*phi/180.0)))*180.0/pi
#Calculate deflection angle
phi2 = deflection_angle(phi1, n1, n2)
#Convert to the 0-360 angle
s1 = np.sign(np.sin(pi*(theta1 - theta)/180.0))
s2 = -np.sign(np.cos(pi*(theta1 - theta)/180.0))
phi2 = theta + s1*90 + s1*s2*phi2
return phi2
def vc_reflect(theta, theta1):
"""Convert theta and theta1 to 0-360 format.
Parameters
----------
theta : float
Angle of the surface measured from right.
theta1 : float
Angle of the incident beam measured from right.
Returns
-------
float
"""
#Combert theta and theta1 to 0-360 format
if theta < 0:
theta = 360.0 + theta
if theta > 180:
theta = theta -180.0
if theta1 < 0:
theta1 = 360.0 + theta1
return theta - (theta1 - theta)
#}}}
|
py | 1a3e6600da8d69301ffc4e837ab9b6fc382a8314 | import shutit
# This creates a box that speeds up builds for https://github.com/ianmiell/shutit-openshift-cluster
#s = shutit.create_session('bash',loglevel='debug',echo=True)
s = shutit.create_session('bash',loglevel='info',echo=True)
s.send('rm -rf tmpvagrantboxcreate && mkdir tmpvagrantboxcreate && cd tmpvagrantboxcreate')
s.send('vagrant init centos/7')
s.send('vagrant box update')
s.send('vagrant up')
# Log onto machine and prepare it.
s.login('vagrant ssh')
s.login('sudo su -')
s.send('yum install -y wget')
# Not necessary, but handy
s.install('yum-utils')
s.install('sysstat')
# Installed by cookbook anyway
s.install('epel-release')
s.install('docker')
s.install('iptables-services')
s.install('vim-enhanced')
s.install('git')
s.install('dnsmasq')
s.install('python')
s.install('libselinux-python')
s.install('net-tools')
s.install('bind-utils')
s.install('bash-completion')
s.install('deltarpm')
s.install('libselinux-python')
# origin deps TODO
#s.multisend('yum install -y ',{'s this ok':'y'})
s.send(r'''sed -i 's/.*\/10\(.*\)/*\1/' /etc/cron.d/sysstat && systemctl enable sysstat''')
s.send(r'''sed -i 's/^\(127.0.0.1[ \t]*[^ \t]*\).*/\1/' /etc/hosts''',note='Make sure chef sees a fqdn.')
s.send('wget -qO- https://raw.githubusercontent.com/ianmiell/vagrant-swapfile/master/vagrant-swapfile.sh | sh')
s.send('echo root:origin | /usr/sbin/chpasswd')
# Downloads
# Client
s.send('wget -nc -q https://packages.chef.io/files/stable/chef/13.5.3/el/7/chef-13.5.3-1.el7.x86_64.rpm')
#s.send('wget -nc -q https://packages.chef.io/files/stable/chef/12.21.4/el/7/chef-12.21.4-1.el7.x86_64.rpm')
# Chefdk
s.send('wget -nc -q https://packages.chef.io/files/stable/chefdk/2.5.3/el/7/chefdk-2.5.3-1.el7.x86_64.rpm')
# Chef server
# eg https://downloads.chef.io/chef-server/12.17.33
# Go to chef website and download the rpm. Then split and store on github, and pull from the raw links here.
#eg:
# split -b 49m chef-server-core-12.17.3-1.el7.x86_64.rpm chef-server-core-12.17.3-1.el7.x86_64.rpm.x
s.send('wget -nc -q https://github.com/ianmiell/shutit-chef-env/raw/master/chef-server-core-12.17.3-1.el7.x86_64.rpm.xaa')
s.send('wget -nc -q https://github.com/ianmiell/shutit-chef-env/raw/master/chef-server-core-12.17.3-1.el7.x86_64.rpm.xab')
s.send('wget -nc -q https://github.com/ianmiell/shutit-chef-env/raw/master/chef-server-core-12.17.3-1.el7.x86_64.rpm.xac')
s.send('cat chef-server-core-12.17.3-1.el7.x86_64.rpm.xaa chef-server-core-12.17.3-1.el7.x86_64.rpm.xab chef-server-core-12.17.3-1.el7.x86_64.rpm.xac > chef-server-core-12.17.3-1.el7.x86_64.rpm')
s.send('rm -f *xaa *xab *xac')
# Guest additions
s.multisend('yum install -y dkms kernel-devel kernel-devel-3.10.0-862.2.3.el7.x86_64',{'s this ok':'y'})
s.multisend('yum groupinstall "Development Tools"',{'s this ok':'y'})
s.send('wget -q http://download.virtualbox.org/virtualbox/5.2.2/VBoxGuestAdditions_5.2.2.iso')
s.send('mount -t iso9660 -o loop ./VBoxGuestAdditions_*.iso /mnt')
s.send('cd /mnt')
s.send('./VBoxLinuxAdditions.run')
s.send('cd -')
# Workaround for docker networking issues + landrush.
s.insert_text('Environment=GODEBUG=netdns=cgo','/lib/systemd/system/docker.service',pattern='.Service.')
s.send('mkdir -p /etc/docker',note='Create the docker config folder')
# The containers running in the pods take their dns setting from the docker daemon. Add the default kubernetes service ip to the list so that items can be updated.
# Ref: IWT-3895
#s.send_file('/etc/docker/daemon.json',"""{
# "dns": ["8.8.8.8"]
#}""",note='Use the google dns server rather than the vagrant one. Change to the value you want if this does not work, eg if google dns is blocked.')
s.send(r'''sed -i 's/^\(127.0.0.1[ \t]*[^ \t]*\).*/\1/' /etc/hosts''')
s.send('''sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config''')
s.send('''sed -i 's/.*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config''')
s.send('service sshd restart')
s.send('echo root:chef | /usr/sbin/chpasswd')
s.multisend('ssh-keygen',{'Enter file':'','Enter passphrase':'','Enter same pass':''})
# install expects httpd to be not installed
#s.install('httpd')
s.remove('httpd')
s.send('touch /root/buildtime')
s.logout()
s.logout()
# Remove package.box because it's hella confusing otherwise.
s.send('vagrant box remove package.box || true')
s.send('vagrant package')
s.send('split -b 49m package.box') # 50m is the github warning limit
s.send('cd /space/git/shutit-openshift-cluster')
s.send('rm -rf cachedbox/* || git rm -f cachedbox/*')
s.send('mkdir -p /space/git/shutit-openshift-cluster/cachedbox')
s.send('cd -')
s.send('mv x* /space/git/shutit-openshift-cluster/cachedbox')
s.send('cd /space/git/shutit-openshift-cluster')
s.send('git add cachedbox')
s.send('git commit -am cachedbox')
s.send('git push')
s.send('cd -')
s.pause_point('Box created.')
|
py | 1a3e661bc07f9f705eafa1b355d0410e29333863 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 16 14:52:14 2019
@author: Artemis
"""
"""
import numpy as np
import matplotlib.pyplot as plt
line = np.linspace(-5, 5, 200)
plt.plot(line, np.tanh(line), label='tanh')
plt.plot(line, np.maximum(line, 0), label='relu')
plt.legend(loc='best')
plt.xlabel('x')
plt.ylabel('relu(x) and tanh(x)')
plt.show()
"""
"""
from sklearn.neural_network import MLPClassifier
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
wine = load_wine()
X = wine.data[:,:2]
y = wine.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=[10,10],
activation='tanh', alpha=1)
mlp.fit(X_train, y_train)
cmap_light = ListedColormap(['#FFAAAA','#AAFFAA','#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000','#00FF00','#0000FF'])
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .02),
np.arange(y_min, y_max, .02))
Z = mlp.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolor='k', s=60)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("MLPClassifier: solver=lbfgs")
plt.show()
"""
from sklearn.datasets import fetch_mldata
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from PIL import Image
import numpy as np
mnist = fetch_mldata('mnist-original')
X = mnist.data/255.
y = mnist.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=5000, test_size=1000, random_state=62)
mlp_hw = MLPClassifier(solver='lbfgs', hidden_layer_sizes=[100,100],
activation='relu', alpha=1e-5, random_state=62)
mlp_hw.fit(X_train, y_train)
print("Score:{:.2f}%".format(mlp_hw.score(X_test, y_test)*100))
image = Image.open('data/4.jpg').convert('F')
image = image.resize((28,28))
arr = []
for i in range(28):
for j in range(28):
pixel = 1.0 - float(image.getpixel((j,i))) / 255.
arr.append(pixel)
arr_1 = np.array(arr).reshape(1, -1)
print('The number in the image: {:.0f}'.format(mlp_hw.predict(arr_1)[0]))
pass |
py | 1a3e6658d0f51cc1fd104e4296784f8da8720c1d | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/resources/paid_organic_search_term_view.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/resources/paid_organic_search_term_view.proto',
package='google.ads.googleads.v3.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v3.resourcesB\036PaidOrganicSearchTermViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v3/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V3.Resources\312\002!Google\\Ads\\GoogleAds\\V3\\Resources\352\002%Google::Ads::GoogleAds::V3::Resources'),
serialized_pb=_b('\nKgoogle/ads/googleads_v3/proto/resources/paid_organic_search_term_view.proto\x12!google.ads.googleads.v3.resources\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\"\xf1\x01\n\x19PaidOrganicSearchTermView\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x31\n\x0bsearch_term\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue:\x89\x01\xea\x41\x85\x01\n2googleads.googleapis.com/PaidOrganicSearchTermView\x12Ocustomers/{customer}/paidOrganicSearchTermViews/{paid_organic_search_term_view}B\x8b\x02\n%com.google.ads.googleads.v3.resourcesB\x1ePaidOrganicSearchTermViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v3/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V3.Resources\xca\x02!Google\\Ads\\GoogleAds\\V3\\Resources\xea\x02%Google::Ads::GoogleAds::V3::Resourcesb\x06proto3')
,
dependencies=[google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_PAIDORGANICSEARCHTERMVIEW = _descriptor.Descriptor(
name='PaidOrganicSearchTermView',
full_name='google.ads.googleads.v3.resources.PaidOrganicSearchTermView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.resources.PaidOrganicSearchTermView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='search_term', full_name='google.ads.googleads.v3.resources.PaidOrganicSearchTermView.search_term', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('\352A\205\001\n2googleads.googleapis.com/PaidOrganicSearchTermView\022Ocustomers/{customer}/paidOrganicSearchTermViews/{paid_organic_search_term_view}'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=204,
serialized_end=445,
)
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
DESCRIPTOR.message_types_by_name['PaidOrganicSearchTermView'] = _PAIDORGANICSEARCHTERMVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PaidOrganicSearchTermView = _reflection.GeneratedProtocolMessageType('PaidOrganicSearchTermView', (_message.Message,), dict(
DESCRIPTOR = _PAIDORGANICSEARCHTERMVIEW,
__module__ = 'google.ads.googleads_v3.proto.resources.paid_organic_search_term_view_pb2'
,
__doc__ = """A paid organic search term view providing a view of search stats across
ads and organic listings aggregated by search term at the ad group
level.
Attributes:
resource_name:
The resource name of the search term view. Search term view
resource names have the form: ``customers/{customer_id}/paidO
rganicSearchTermViews/{campaign_id}~ {ad_group_id}~{URL-base64
search term}``
search_term:
The search term.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.resources.PaidOrganicSearchTermView)
))
_sym_db.RegisterMessage(PaidOrganicSearchTermView)
DESCRIPTOR._options = None
_PAIDORGANICSEARCHTERMVIEW._options = None
# @@protoc_insertion_point(module_scope)
|
py | 1a3e674a8869041aff8080e82896494af780d02e | from vdb.lib.npm import NpmSource
from depscan.lib import config as config
from depscan.lib.pkg_query import npm_metadata, pypi_metadata
# Dict mapping project type to the audit source
type_audit_map = {"nodejs": NpmSource(), "js": NpmSource()}
# Dict mapping project type to risk audit
risk_audit_map = {
"nodejs": npm_metadata,
"js": npm_metadata,
"python": pypi_metadata,
"py": pypi_metadata,
}
def audit(project_type, pkg_list, report_file):
"""
Method to audit packages using remote source such as npm advisory
:param project_type: Project type
:param pkg_list: List of packages
:param report_file: Report file
"""
results = type_audit_map[project_type].bulk_search(
app_info=config.npm_app_info, pkg_list=pkg_list
)
return results
def risk_audit(project_type, scoped_pkgs, private_ns, pkg_list, report_file):
"""
Method to perform risk audit for packages using package managers api
:param project_type: Project type
:param private_ns: Private namespace
:param pkg_list: List of packages
:param report_file: Report file
"""
audit_fn = risk_audit_map[project_type]
results = audit_fn(scoped_pkgs, pkg_list, private_ns)
return results
|
py | 1a3e67507af0d08392fb4bddf78831e491df2e00 | import re
from imbi.endpoints.admin import base
class CRUDRequestHandler(base.CRUDRequestHandler):
NAME = 'admin-namespaces'
ID_KEY = 'name'
FIELDS = ['name', 'slug', 'icon_class', 'maintained_by']
DEFAULTS = {'icon_class': 'fas fa-users', 'maintained_by': []}
DELETE_SQL = 'DELETE FROM v1.namespaces WHERE "name"=%(name)s;'
GET_SQL = re.sub(r'\s+', ' ', """\
SELECT "name", created_at, created_by, last_modified_at, last_modified_by,
slug, icon_class, "maintained_by"
FROM v1.namespaces WHERE "name"=%(name)s;""")
PATCH_SQL = re.sub(r'\s+', ' ', """\
UPDATE v1.namespaces
SET "name" = %(name)s,
last_modified_at = CURRENT_TIMESTAMP,
last_modified_by = %(username)s,
slug = %(slug)s,
icon_class = %(icon_class)s,
"maintained_by" = %(maintained_by)s
WHERE "name"=%(current_name)s;""")
POST_SQL = re.sub(r'\s+', ' ', """\
INSERT INTO v1.namespaces
("name", created_by, slug, icon_class, "maintained_by")
VALUES (%(name)s, %(username)s, %(slug)s, %(icon_class)s,
%(maintained_by)s)
RETURNING "name";""")
|
py | 1a3e6820b88c2818c4c39aa99d732326c216b4b1 | import logging
import datetime
import xml.etree.cElementTree as ET
import core
from core.helpers import Url
logging = logging.getLogger(__name__)
'''
Does not supply rss feed -- backlog searches only.
'''
def search(imdbid, term):
proxy_enabled = core.CONFIG['Server']['Proxy']['enabled']
logging.info('Searching Zooqle for {}.'.format(term))
url = 'https://zooqle.com/search?q={}&fmt=rss'.format(term)
try:
if proxy_enabled and core.proxy.whitelist('https://www.zooqle.com') is True:
response = Url.open(url, proxy_bypass=True).text
else:
response = Url.open(url).text
if response:
return _parse(response, imdbid)
else:
return []
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Zooqle search failed.', exc_info=True)
return []
def get_rss():
return []
def _parse(xml, imdbid):
logging.info('Parsing Zooqle results.')
tree = ET.fromstring(xml)
items = tree[0].findall('item')
results = []
for i in items:
result = {}
try:
result['score'] = 0
size, suffix = i.find('description').text.strip().split(', ')[-1].split(' ')
m = (1024 ** 2) if suffix == 'MB' else (1024 ** 3)
result['size'] = int(float(size.replace(',', '')) * m)
result['status'] = 'Available'
pd = i.find('pubDate').text
result['pubdate'] = datetime.datetime.strftime(datetime.datetime.strptime(pd, '%a, %d %b %Y %H:%M:%S %z'), '%d %b %Y')
result['title'] = i.find('title').text
result['imdbid'] = imdbid
result['indexer'] = 'Zooqle'
result['info_link'] = i.find('guid').text
result['torrentfile'] = i[8].text
result['guid'] = i[7].text.lower()
result['type'] = 'magnet'
result['downloadid'] = None
result['freeleech'] = 0
result['download_client'] = None
result['seeders'] = int(i[9].text)
results.append(result)
except Exception as e:
logging.error('Error parsing Zooqle XML.', exc_info=True)
continue
logging.info('Found {} results from Zooqle.'.format(len(results)))
return results
|
py | 1a3e69dbc07534d8f40e9e5f86cc6c8878341e3f | import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec, LeafSpec
from torch.utils._pytree import _broadcast_to_and_flatten
class TestPytree(TestCase):
def test_treespec_equality(self):
self.assertTrue(LeafSpec() == LeafSpec())
self.assertTrue(TreeSpec(list, None, []) == TreeSpec(list, None, []))
self.assertTrue(TreeSpec(list, None, [LeafSpec()]) == TreeSpec(list, None, [LeafSpec()]))
self.assertFalse(TreeSpec(tuple, None, []) == TreeSpec(list, None, []))
self.assertTrue(TreeSpec(tuple, None, []) != TreeSpec(list, None, []))
def test_flatten_unflatten_leaf(self):
def run_test_with_leaf(leaf):
values, treespec = tree_flatten(leaf)
self.assertEqual(values, [leaf])
self.assertEqual(treespec, LeafSpec())
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, leaf)
run_test_with_leaf(1)
run_test_with_leaf(1.)
run_test_with_leaf(None)
run_test_with_leaf(bool)
run_test_with_leaf(torch.randn(3, 3))
def test_flatten_unflatten_list(self):
def run_test(lst):
expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst])
values, treespec = tree_flatten(lst)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, lst)
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, lst)
self.assertTrue(isinstance(unflattened, list))
run_test([])
run_test([1., 2])
run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def test_flatten_unflatten_tuple(self):
def run_test(tup):
expected_spec = TreeSpec(tuple, None, [LeafSpec() for _ in tup])
values, treespec = tree_flatten(tup)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, list(tup))
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, tup)
self.assertTrue(isinstance(unflattened, tuple))
run_test(())
run_test((1.,))
run_test((1., 2))
run_test((torch.tensor([1., 2]), 2, 10, 9, 11))
def test_flatten_unflatten_dict(self):
def run_test(tup):
expected_spec = TreeSpec(dict, list(tup.keys()),
[LeafSpec() for _ in tup.values()])
values, treespec = tree_flatten(tup)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, list(tup.values()))
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, tup)
self.assertTrue(isinstance(unflattened, dict))
run_test({})
run_test({'a': 1})
run_test({'abcdefg': torch.randn(2, 3)})
run_test({1: torch.randn(2, 3)})
run_test({'a': 1, 'b': 2, 'c': torch.randn(2, 3)})
def test_flatten_unflatten_nested(self):
def run_test(pytree):
values, treespec = tree_flatten(pytree)
self.assertTrue(isinstance(values, list))
self.assertEqual(len(values), treespec.num_leaves)
# NB: python basic data structures (dict list tuple) all have
# contents equality defined on them, so the following works for them.
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, pytree)
cases = [
[()],
([],),
{'a': ()},
{'a': 0, 'b': [{'c': 1}]},
{'a': 0, 'b': [1, {'c': 2}, torch.randn(3)], 'c': (torch.randn(2, 3), 1)},
]
for case in cases:
run_test(case)
def test_treespec_repr(self):
# Check that it looks sane
pytree = (0, [0, 0, 0])
_, spec = tree_flatten(pytree)
self.assertEqual(
repr(spec), 'TreeSpec(tuple, None, [*, TreeSpec(list, None, [*, *, *])])')
def test_broadcast_to_and_flatten(self):
cases = [
(1, (), []),
# Same (flat) structures
((1,), (0,), [1]),
([1], [0], [1]),
((1, 2, 3), (0, 0, 0), [1, 2, 3]),
({'a': 1, 'b': 2}, {'a': 0, 'b': 0}, [1, 2]),
# Mismatched (flat) structures
([1], (0,), None),
([1], (0,), None),
((1,), [0], None),
((1, 2, 3), (0, 0), None),
({'a': 1, 'b': 2}, {'a': 0}, None),
({'a': 1, 'b': 2}, {'a': 0, 'c': 0}, None),
({'a': 1, 'b': 2}, {'a': 0, 'b': 0, 'c': 0}, None),
# Same (nested) structures
((1, [2, 3]), (0, [0, 0]), [1, 2, 3]),
((1, [(2, 3), 4]), (0, [(0, 0), 0]), [1, 2, 3, 4]),
# Mismatched (nested) structures
((1, [2, 3]), (0, (0, 0)), None),
((1, [2, 3]), (0, [0, 0, 0]), None),
# Broadcasting single value
(1, (0, 0, 0), [1, 1, 1]),
(1, [0, 0, 0], [1, 1, 1]),
(1, {'a': 0, 'b': 0}, [1, 1]),
(1, (0, [0, [0]], 0), [1, 1, 1, 1]),
(1, (0, [0, [0, [], [[[0]]]]], 0), [1, 1, 1, 1, 1]),
# Broadcast multiple things
((1, 2), ([0, 0, 0], [0, 0]), [1, 1, 1, 2, 2]),
((1, 2), ([0, [0, 0], 0], [0, 0]), [1, 1, 1, 1, 2, 2]),
(([1, 2, 3], 4), ([0, [0, 0], 0], [0, 0]), [1, 2, 2, 3, 4, 4]),
]
for pytree, to_pytree, expected in cases:
_, to_spec = tree_flatten(to_pytree)
result = _broadcast_to_and_flatten(pytree, to_spec)
self.assertEqual(result, expected, msg=str([pytree, to_spec, expected]))
if __name__ == '__main__':
run_tests()
|
py | 1a3e6d87441e45740d726e56b23c4f61eb7cf796 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..core.target import Target
from ..utility.notification import Notification
import logging
from struct import unpack
from time import time
from binascii import crc32
# Number of bytes in a page to read to quickly determine if the page has the same data
PAGE_ESTIMATE_SIZE = 32
PAGE_READ_WEIGHT = 0.3
DATA_TRANSFER_B_PER_S = 40 * 1000 # ~40KB/s, depends on clock speed, theoretical limit for HID is 56,000 B/s
## @brief Exception raised when flashing fails outright.
class FlashFailure(RuntimeError):
pass
class ProgrammingInfo(object):
def __init__(self):
self.program_type = None # Type of programming performed - FLASH_PAGE_ERASE or FLASH_CHIP_ERASE
self.program_time = None # Total programming time
self.analyze_type = None # Type of flash analysis performed - FLASH_ANALYSIS_CRC32 or FLASH_ANALYSIS_PARTIAL_PAGE_READ
self.analyze_time = None # Time to analyze flash contents
def _same(d1, d2):
assert len(d1) == len(d2)
for i in range(len(d1)):
if d1[i] != d2[i]:
return False
return True
def _erased(d):
for i in range(len(d)):
if d[i] != 0xFF:
return False
return True
def _stub_progress(percent):
pass
class FlashPage(object):
def __init__(self, addr, size, data, erase_weight, program_weight):
self.addr = addr
self.size = size
self.data = data
self.erase_weight = erase_weight
self.program_weight = program_weight
self.erased = None
self.same = None
def get_program_weight(self):
"""
Get time to program a page including the data transfer
"""
return self.program_weight + \
float(len(self.data)) / float(DATA_TRANSFER_B_PER_S)
def get_erase_program_weight(self):
"""
Get time to erase and program a page including data transfer time
"""
return self.erase_weight + self.program_weight + \
float(len(self.data)) / float(DATA_TRANSFER_B_PER_S)
def get_verify_weight(self):
"""
Get time to verify a page
"""
return float(self.size) / float(DATA_TRANSFER_B_PER_S)
class FlashOperation(object):
def __init__(self, addr, data):
self.addr = addr
self.data = data
class FlashBuilder(object):
# Type of flash operation
FLASH_PAGE_ERASE = 1
FLASH_CHIP_ERASE = 2
# Type of flash analysis
FLASH_ANALYSIS_CRC32 = "CRC32"
FLASH_ANALYSIS_PARTIAL_PAGE_READ = "PAGE_READ"
def __init__(self, flash, base_addr=0):
self.flash = flash
self.flash_start = base_addr
self.flash_operation_list = []
self.page_list = []
self.perf = ProgrammingInfo()
self.enable_double_buffering = True
self.max_errors = 10
def enable_double_buffer(self, enable):
self.enable_double_buffering = enable
def set_max_errors(self, count):
self.max_errors = count
def add_data(self, addr, data):
"""
Add a block of data to be programmed
Note - programming does not start until the method
program is called.
"""
# Sanity check
if addr < self.flash_start:
raise Exception("Invalid flash address 0x%x is before flash start 0x%x" % (addr, self.flash_start))
# Add operation to list
self.flash_operation_list.append(FlashOperation(addr, data))
# Keep list sorted
self.flash_operation_list = sorted(self.flash_operation_list, key=lambda operation: operation.addr)
# Verify this does not overlap
prev_flash_operation = None
for operation in self.flash_operation_list:
if prev_flash_operation != None:
if prev_flash_operation.addr + len(prev_flash_operation.data) > operation.addr:
raise ValueError("Error adding data - Data at 0x%x..0x%x overlaps with 0x%x..0x%x"
% (prev_flash_operation.addr, prev_flash_operation.addr + len(prev_flash_operation.data),
operation.addr, operation.addr + len(operation.data)))
prev_flash_operation = operation
def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_verify=False):
"""
Determine fastest method of flashing and then run flash programming.
Data must have already been added with add_data
"""
# Send notification that we're about to program flash.
self.flash.target.notify(Notification(event=Target.EVENT_PRE_FLASH_PROGRAM, source=self))
# Assumptions
# 1. Page erases must be on page boundaries ( page_erase_addr % page_size == 0 )
# 2. Page erase can have a different size depending on location
# 3. It is safe to program a page with less than a page of data
# Examples
# - lpc4330 -Non 0 base address
# - nRF51 -UICR location far from flash (address 0x10001000)
# - LPC1768 -Different sized pages
program_start = time()
if progress_cb is None:
progress_cb = _stub_progress
# There must be at least 1 flash operation
if len(self.flash_operation_list) == 0:
logging.warning("No pages were programmed")
return
# Convert the list of flash operations into flash pages
program_byte_count = 0
flash_addr = self.flash_operation_list[0].addr
info = self.flash.get_page_info(flash_addr)
if info is None:
raise FlashFailure("Attempt to program flash at invalid address 0x%08x" % flash_addr)
page_addr = flash_addr - (flash_addr % info.size)
current_page = FlashPage(page_addr, info.size, [], info.erase_weight, info.program_weight)
self.page_list.append(current_page)
for flash_operation in self.flash_operation_list:
pos = 0
while pos < len(flash_operation.data):
# Check if operation is in next page
flash_addr = flash_operation.addr + pos
if flash_addr >= current_page.addr + current_page.size:
info = self.flash.get_page_info(flash_addr)
if info is None:
raise FlashFailure("Attempt to program flash at invalid address 0x%08x" % flash_addr)
page_addr = flash_addr - (flash_addr % info.size)
current_page = FlashPage(page_addr, info.size, [], info.erase_weight, info.program_weight)
self.page_list.append(current_page)
# Fill the page gap if there is one
page_data_end = current_page.addr + len(current_page.data)
if flash_addr != page_data_end:
old_data = self.flash.target.read_memory_block8(page_data_end, flash_addr - page_data_end)
current_page.data.extend(old_data)
# Copy data to page and increment pos
space_left_in_page = info.size - len(current_page.data)
space_left_in_data = len(flash_operation.data) - pos
amount = min(space_left_in_page, space_left_in_data)
current_page.data.extend(flash_operation.data[pos:pos + amount])
program_byte_count += amount
#increment position
pos += amount
# If smart flash was set to false then mark all pages
# as requiring programming
if not smart_flash:
self._mark_all_pages_for_programming()
# If the first page being programmed is not the first page
# in ROM then don't use a chip erase
if self.page_list[0].addr > self.flash_start:
if chip_erase is None:
chip_erase = False
elif chip_erase is True:
logging.warning('Chip erase used when flash address 0x%x is not the same as flash start 0x%x', self.page_list[0].addr, self.flash_start)
self.flash.init()
chip_erase_count, chip_erase_program_time = self._compute_chip_erase_pages_and_weight()
page_erase_min_program_time = self._compute_page_erase_pages_weight_min()
# If chip_erase hasn't been specified determine if chip erase is faster
# than page erase regardless of contents
if (chip_erase is None) and (chip_erase_program_time < page_erase_min_program_time):
chip_erase = True
# If chip erase isn't True then analyze the flash
if chip_erase != True:
analyze_start = time()
if self.flash.get_flash_info().crc_supported:
sector_erase_count, page_program_time = self._compute_page_erase_pages_and_weight_crc32(fast_verify)
self.perf.analyze_type = FlashBuilder.FLASH_ANALYSIS_CRC32
else:
sector_erase_count, page_program_time = self._compute_page_erase_pages_and_weight_sector_read()
self.perf.analyze_type = FlashBuilder.FLASH_ANALYSIS_PARTIAL_PAGE_READ
analyze_finish = time()
self.perf.analyze_time = analyze_finish - analyze_start
logging.debug("Analyze time: %f" % (analyze_finish - analyze_start))
# If chip erase hasn't been set then determine fastest method to program
if chip_erase is None:
logging.debug("Chip erase count %i, Page erase est count %i" % (chip_erase_count, sector_erase_count))
logging.debug("Chip erase weight %f, Page erase weight %f" % (chip_erase_program_time, page_program_time))
chip_erase = chip_erase_program_time < page_program_time
if chip_erase:
if self.flash.is_double_buffering_supported and self.enable_double_buffering:
logging.debug("Using double buffer chip erase program")
flash_operation = self._chip_erase_program_double_buffer(progress_cb)
else:
flash_operation = self._chip_erase_program(progress_cb)
else:
if self.flash.is_double_buffering_supported and self.enable_double_buffering:
logging.debug("Using double buffer page erase program")
flash_operation = self._page_erase_program_double_buffer(progress_cb)
else:
flash_operation = self._page_erase_program(progress_cb)
self.flash.target.reset_stop_on_reset()
program_finish = time()
self.perf.program_time = program_finish - program_start
self.perf.program_type = flash_operation
logging.info("Programmed %d bytes (%d pages) at %.02f kB/s", program_byte_count, len(self.page_list), ((program_byte_count/1024) / self.perf.program_time))
# Send notification that we're done programming flash.
self.flash.target.notify(Notification(event=Target.EVENT_POST_FLASH_PROGRAM, source=self))
return self.perf
def get_performance(self):
return self.perf
def _mark_all_pages_for_programming(self):
for page in self.page_list:
page.erased = False
page.same = False
def _compute_chip_erase_pages_and_weight(self):
"""
Compute the number of erased pages.
Determine how many pages in the new data are already erased.
"""
chip_erase_count = 0
chip_erase_weight = 0
chip_erase_weight += self.flash.get_flash_info().erase_weight
for page in self.page_list:
if page.erased is None:
page.erased = _erased(page.data)
if not page.erased:
chip_erase_count += 1
chip_erase_weight += page.get_program_weight()
self.chip_erase_count = chip_erase_count
self.chip_erase_weight = chip_erase_weight
return chip_erase_count, chip_erase_weight
def _compute_page_erase_pages_weight_min(self):
page_erase_min_weight = 0
for page in self.page_list:
page_erase_min_weight += page.get_verify_weight()
return page_erase_min_weight
def _compute_page_erase_pages_and_weight_sector_read(self):
"""
Estimate how many pages are the same.
Quickly estimate how many pages are the same. These estimates are used
by page_erase_program so it is recommended to call this before beginning programming
This is done automatically by smart_program.
"""
# Quickly estimate how many pages are the same
page_erase_count = 0
page_erase_weight = 0
for page in self.page_list:
# Analyze pages that haven't been analyzed yet
if page.same is None:
size = min(PAGE_ESTIMATE_SIZE, len(page.data))
data = self.flash.target.read_memory_block8(page.addr, size)
page_same = _same(data, page.data[0:size])
if page_same is False:
page.same = False
# Put together page and time estimate
for page in self.page_list:
if page.same is False:
page_erase_count += 1
page_erase_weight += page.get_erase_program_weight()
elif page.same is None:
# Page is probably the same but must be read to confirm
page_erase_weight += page.get_verify_weight()
elif page.same is True:
# Page is confirmed to be the same so no programming weight
pass
self.page_erase_count = page_erase_count
self.page_erase_weight = page_erase_weight
return page_erase_count, page_erase_weight
def _compute_page_erase_pages_and_weight_crc32(self, assume_estimate_correct=False):
"""
Estimate how many pages are the same.
Quickly estimate how many pages are the same. These estimates are used
by page_erase_program so it is recommended to call this before beginning programming
This is done automatically by smart_program.
If assume_estimate_correct is set to True, then pages with matching CRCs
will be marked as the same. There is a small chance that the CRCs match even though the
data is different, but the odds of this happing are low: ~1/(2^32) = ~2.33*10^-8%.
"""
# Build list of all the pages that need to be analyzed
sector_list = []
page_list = []
for page in self.page_list:
if page.same is None:
# Add sector to compute_crcs
sector_list.append((page.addr, page.size))
page_list.append(page)
# Compute CRC of data (Padded with 0xFF)
data = list(page.data)
pad_size = page.size - len(page.data)
if pad_size > 0:
data.extend([0xFF] * pad_size)
page.crc = crc32(bytearray(data)) & 0xFFFFFFFF
# Analyze pages
page_erase_count = 0
page_erase_weight = 0
if len(page_list) > 0:
crc_list = self.flash.compute_crcs(sector_list)
for page, crc in zip(page_list, crc_list):
page_same = page.crc == crc
if assume_estimate_correct:
page.same = page_same
elif page_same is False:
page.same = False
# Put together page and time estimate
for page in self.page_list:
if page.same is False:
page_erase_count += 1
page_erase_weight += page.get_erase_program_weight()
elif page.same is None:
# Page is probably the same but must be read to confirm
page_erase_weight += page.get_verify_weight()
elif page.same is True:
# Page is confirmed to be the same so no programming weight
pass
self.page_erase_count = page_erase_count
self.page_erase_weight = page_erase_weight
return page_erase_count, page_erase_weight
def _chip_erase_program(self, progress_cb=_stub_progress):
"""
Program by first performing a chip erase.
"""
logging.debug("Smart chip erase")
logging.debug("%i of %i pages already erased", len(self.page_list) - self.chip_erase_count, len(self.page_list))
progress_cb(0.0)
progress = 0
self.flash.erase_all()
progress += self.flash.get_flash_info().erase_weight
for page in self.page_list:
if not page.erased:
self.flash.program_page(page.addr, page.data)
progress += page.get_program_weight()
progress_cb(float(progress) / float(self.chip_erase_weight))
progress_cb(1.0)
return FlashBuilder.FLASH_CHIP_ERASE
def _next_unerased_page(self, i):
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
while page.erased:
i += 1
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
return page, i + 1
def _chip_erase_program_double_buffer(self, progress_cb=_stub_progress):
"""
Program by first performing a chip erase.
"""
logging.debug("Smart chip erase")
logging.debug("%i of %i pages already erased", len(self.page_list) - self.chip_erase_count, len(self.page_list))
progress_cb(0.0)
progress = 0
self.flash.erase_all()
progress += self.flash.get_flash_info().erase_weight
# Set up page and buffer info.
error_count = 0
current_buf = 0
next_buf = 1
page, i = self._next_unerased_page(0)
assert page is not None
# Load first page buffer
self.flash.load_page_buffer(current_buf, page.addr, page.data)
while page is not None:
# Kick off this page program.
current_addr = page.addr
current_weight = page.get_program_weight()
self.flash.start_program_page_with_buffer(current_buf, current_addr)
# Get next page and load it.
page, i = self._next_unerased_page(i)
if page is not None:
self.flash.load_page_buffer(next_buf, page.addr, page.data)
# Wait for the program to complete.
result = self.flash.wait_for_completion()
# check the return code
if result != 0:
logging.error('program_page(0x%x) error: %i', current_addr, result)
error_count += 1
if error_count > self.max_errors:
logging.error("Too many page programming errors, aborting program operation")
break
# Swap buffers.
temp = current_buf
current_buf = next_buf
next_buf = temp
# Update progress.
progress += current_weight
progress_cb(float(progress) / float(self.chip_erase_weight))
progress_cb(1.0)
return FlashBuilder.FLASH_CHIP_ERASE
def _page_erase_program(self, progress_cb=_stub_progress):
"""
Program by performing sector erases.
"""
actual_page_erase_count = 0
actual_page_erase_weight = 0
progress = 0
progress_cb(0.0)
for page in self.page_list:
# If the page is not the same
if page.same is False:
progress += page.get_erase_program_weight()
# Read page data if unknown - after this page.same will be True or False
if page.same is None:
data = self.flash.target.read_memory_block8(page.addr, len(page.data))
page.same = _same(page.data, data)
progress += page.get_verify_weight()
# Program page if not the same
if page.same is False:
self.flash.erase_page(page.addr)
self.flash.program_page(page.addr, page.data)
actual_page_erase_count += 1
actual_page_erase_weight += page.get_erase_program_weight()
# Update progress
if self.page_erase_weight > 0:
progress_cb(float(progress) / float(self.page_erase_weight))
progress_cb(1.0)
logging.debug("Estimated page erase count: %i", self.page_erase_count)
logging.debug("Actual page erase count: %i", actual_page_erase_count)
return FlashBuilder.FLASH_PAGE_ERASE
def _scan_pages_for_same(self, progress_cb=_stub_progress):
"""
Program by performing sector erases.
"""
progress = 0
count = 0
same_count = 0
for page in self.page_list:
# Read page data if unknown - after this page.same will be True or False
if page.same is None:
data = self.flash.target.read_memory_block8(page.addr, len(page.data))
page.same = _same(page.data, data)
progress += page.get_verify_weight()
count += 1
if page.same:
same_count += 1
# Update progress
progress_cb(float(progress) / float(self.page_erase_weight))
return progress
def _next_nonsame_page(self, i):
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
while page.same:
i += 1
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
return page, i + 1
def _page_erase_program_double_buffer(self, progress_cb=_stub_progress):
"""
Program by performing sector erases.
"""
actual_page_erase_count = 0
actual_page_erase_weight = 0
progress = 0
progress_cb(0.0)
# Fill in same flag for all pages. This is done up front so we're not trying
# to read from flash while simultaneously programming it.
progress = self._scan_pages_for_same(progress_cb)
# Set up page and buffer info.
error_count = 0
current_buf = 0
next_buf = 1
page, i = self._next_nonsame_page(0)
# Make sure there are actually pages to program differently from current flash contents.
if page is not None:
# Load first page buffer
self.flash.load_page_buffer(current_buf, page.addr, page.data)
while page is not None:
assert page.same is not None
# Kick off this page program.
current_addr = page.addr
current_weight = page.get_erase_program_weight()
self.flash.erase_page(current_addr)
self.flash.start_program_page_with_buffer(current_buf, current_addr) #, erase_page=True)
actual_page_erase_count += 1
actual_page_erase_weight += page.get_erase_program_weight()
# Get next page and load it.
page, i = self._next_nonsame_page(i)
if page is not None:
self.flash.load_page_buffer(next_buf, page.addr, page.data)
# Wait for the program to complete.
result = self.flash.wait_for_completion()
# check the return code
if result != 0:
logging.error('program_page(0x%x) error: %i', current_addr, result)
error_count += 1
if error_count > self.max_errors:
logging.error("Too many page programming errors, aborting program operation")
break
# Swap buffers.
temp = current_buf
current_buf = next_buf
next_buf = temp
# Update progress
progress += current_weight
if self.page_erase_weight > 0:
progress_cb(float(progress) / float(self.page_erase_weight))
progress_cb(1.0)
logging.debug("Estimated page erase count: %i", self.page_erase_count)
logging.debug("Actual page erase count: %i", actual_page_erase_count)
return FlashBuilder.FLASH_PAGE_ERASE
|
py | 1a3e6e09ac6889c34213b0b23ac48c7b6811f04c | # Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Off-Policy Evaluation Class to Streamline OPE."""
from dataclasses import dataclass
from logging import getLogger
from pathlib import Path
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
from pandas import DataFrame
import seaborn as sns
from sklearn.utils import check_scalar
from ..types import BanditFeedback
from ..utils import check_array
from ..utils import check_confidence_interval_arguments
from .estimators_continuous import BaseContinuousOffPolicyEstimator
from .estimators_continuous import KernelizedDoublyRobust as KDR
logger = getLogger(__name__)
@dataclass
class ContinuousOffPolicyEvaluation:
"""Class to conduct OPE using multiple estimators simultaneously.
Parameters
-----------
bandit_feedback: BanditFeedback
Logged bandit feedback data with continuous actions used to conduct OPE.
ope_estimators: List[BaseOffPolicyEstimator]
List of OPE estimators used to evaluate the policy value of evaluation policy.
Estimators must follow the interface of `obp.ope.BaseContinuousOffPolicyEstimator`.
Examples
----------
.. code-block:: python
# a case of implementing OPE (with continuous actions) of an synthetic evaluation policy
>>> from obp.dataset import (
SyntheticContinuousBanditDataset,
linear_reward_funcion_continuous,
linear_behavior_policy_continuous,
linear_synthetic_policy_continuous
)
>>> from obp.ope import (
ContinuousOffPolicyEvaluation,
KernelizedInverseProbabilityWeighting as KernelizedIPW
)
# (1) Synthetic Data Generation
>>> dataset = SyntheticContinuousBanditDataset(
dim_context=5,
reward_function=linear_reward_funcion_continuous,
behavior_policy_function=linear_behavior_policy_continuous,
random_state=12345,
)
>>> bandit_feedback = dataset.obtain_batch_bandit_feedback(
n_rounds=10000, min_action_value=-10, max_action_value=10,
)
# (2) Synthetic Evaluation Policy
>>> action_by_evaluation_policy = linear_synthetic_policy_continuous(
context=bandit_feedback["context"]
)
# (3) Off-Policy Evaluation
>>> ope = ContinuousOffPolicyEvaluation(
bandit_feedback=bandit_feedback,
ope_estimators=[KernelizedIPW(kernel="epanechnikov", bandwidth=0.02)]
)
>>> estimated_policy_value = ope.estimate_policy_values(
action_by_evaluation_policy=action_by_evaluation_policy,
)
>>> estimated_policy_value
{'kernelized_ipw': 2.2858905015106723}
# (4) Ground-truth Policy Value of the Synthetic Evaluation Policy
>>> dataset.calc_ground_truth_policy_value(
context=bandit_feedback["context"], action=action_by_evaluation_policy
)
2.2893029243895215
"""
bandit_feedback: BanditFeedback
ope_estimators: List[BaseContinuousOffPolicyEstimator]
def __post_init__(self) -> None:
"""Initialize class."""
for key_ in ["action", "reward", "pscore"]:
if key_ not in self.bandit_feedback:
raise RuntimeError(f"Missing key of {key_} in 'bandit_feedback'.")
self.bandit_feedback["action_by_behavior_policy"] = self.bandit_feedback[
"action"
]
self.ope_estimators_ = dict()
self.is_model_dependent = False
for estimator in self.ope_estimators:
self.ope_estimators_[estimator.estimator_name] = estimator
if isinstance(estimator, KDR):
self.is_model_dependent = True
def _create_estimator_inputs(
self,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
) -> Dict[str, Dict[str, np.ndarray]]:
"""Create input dictionary to estimate policy value by subclasses of `BaseOffPolicyEstimator`"""
check_array(
array=action_by_evaluation_policy,
name="action_by_evaluation_policy",
expected_dim=1,
)
if estimated_rewards_by_reg_model is None:
pass
elif isinstance(estimated_rewards_by_reg_model, dict):
for estimator_name, value in estimated_rewards_by_reg_model.items():
check_array(
array=value,
name=f"estimated_rewards_by_reg_model[{estimator_name}]",
expected_dim=1,
)
if value.shape != action_by_evaluation_policy.shape:
raise ValueError(
f"Expected `estimated_rewards_by_reg_model[{estimator_name}].shape == action_by_evaluation_policy.shape`, but found it False"
)
elif estimated_rewards_by_reg_model.shape != action_by_evaluation_policy.shape:
raise ValueError(
"Expected `estimated_rewards_by_reg_model.shape == action_by_evaluation_policy.shape`, but found it False"
)
estimator_inputs = {
estimator_name: {
input_: self.bandit_feedback[input_]
for input_ in ["reward", "action_by_behavior_policy", "pscore"]
}
for estimator_name in self.ope_estimators_
}
for estimator_name in self.ope_estimators_:
estimator_inputs[estimator_name][
"action_by_evaluation_policy"
] = action_by_evaluation_policy
if isinstance(estimated_rewards_by_reg_model, dict):
if estimator_name in estimated_rewards_by_reg_model:
estimator_inputs[estimator_name][
"estimated_rewards_by_reg_model"
] = estimated_rewards_by_reg_model[estimator_name]
else:
estimator_inputs[estimator_name][
"estimated_rewards_by_reg_model"
] = None
else:
estimator_inputs[estimator_name][
"estimated_rewards_by_reg_model"
] = estimated_rewards_by_reg_model
return estimator_inputs
def estimate_policy_values(
self,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
) -> Dict[str, float]:
"""Estimate policy value of evaluation policy.
Parameters
------------
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds,) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
Returns
----------
policy_value_dict: Dict[str, float]
Dictionary containing estimated policy values by OPE estimators.
"""
if self.is_model_dependent:
if estimated_rewards_by_reg_model is None:
raise ValueError(
"When model dependent estimators such as DM or DR are used, `estimated_rewards_by_reg_model` must be given"
)
policy_value_dict = dict()
estimator_inputs = self._create_estimator_inputs(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
for estimator_name, estimator in self.ope_estimators_.items():
policy_value_dict[estimator_name] = estimator.estimate_policy_value(
**estimator_inputs[estimator_name]
)
return policy_value_dict
def estimate_intervals(
self,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 100,
random_state: Optional[int] = None,
) -> Dict[str, Dict[str, float]]:
"""Estimate confidence intervals of policy values by nonparametric bootstrap procedure.
Parameters
------------
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by the (deterministic) evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=100
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
policy_value_interval_dict: Dict[str, Dict[str, float]]
Dictionary containing confidence intervals of estimated policy value estimated
using nonparametric bootstrap procedure.
"""
if self.is_model_dependent:
if estimated_rewards_by_reg_model is None:
raise ValueError(
"When model dependent estimators such as DM or DR are used, `estimated_rewards_by_reg_model` must be given"
)
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
policy_value_interval_dict = dict()
estimator_inputs = self._create_estimator_inputs(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
for estimator_name, estimator in self.ope_estimators_.items():
policy_value_interval_dict[estimator_name] = estimator.estimate_interval(
**estimator_inputs[estimator_name],
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return policy_value_interval_dict
def summarize_off_policy_estimates(
self,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 100,
random_state: Optional[int] = None,
) -> Tuple[DataFrame, DataFrame]:
"""Summarize policy values and their confidence intervals estimated by OPE estimators.
Parameters
------------
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by the (deterministic) evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=100
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
(policy_value_df, policy_value_interval_df): Tuple[DataFrame, DataFrame]
Policy values and their confidence intervals estimated by OPE estimators.
"""
policy_value_df = DataFrame(
self.estimate_policy_values(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
),
index=["estimated_policy_value"],
)
policy_value_interval_df = DataFrame(
self.estimate_intervals(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
)
policy_value_of_behavior_policy = self.bandit_feedback["reward"].mean()
policy_value_df = policy_value_df.T
if policy_value_of_behavior_policy <= 0:
logger.warning(
f"Policy value of the behavior policy is {policy_value_of_behavior_policy} (<=0); relative estimated policy value is set to np.nan"
)
policy_value_df["relative_estimated_policy_value"] = np.nan
else:
policy_value_df["relative_estimated_policy_value"] = (
policy_value_df.estimated_policy_value / policy_value_of_behavior_policy
)
return policy_value_df, policy_value_interval_df.T
def visualize_off_policy_estimates(
self,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
alpha: float = 0.05,
is_relative: bool = False,
n_bootstrap_samples: int = 100,
random_state: Optional[int] = None,
fig_dir: Optional[Path] = None,
fig_name: str = "estimated_policy_value.png",
) -> None:
"""Visualize policy values estimated by OPE estimators.
Parameters
----------
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by the (deterministic) evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=100
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
is_relative: bool, default=False,
If True, the method visualizes the estimated policy values of evaluation policy
relative to the ground-truth policy value of behavior policy.
fig_dir: Path, default=None
Path to store the bar figure.
If 'None' is given, the figure will not be saved.
fig_name: str, default="estimated_policy_value.png"
Name of the bar figure.
"""
if fig_dir is not None:
assert isinstance(fig_dir, Path), "fig_dir must be a Path"
if fig_name is not None:
assert isinstance(fig_name, str), "fig_dir must be a string"
estimated_round_rewards_dict = dict()
estimator_inputs = self._create_estimator_inputs(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
for estimator_name, estimator in self.ope_estimators_.items():
estimated_round_rewards_dict[
estimator_name
] = estimator._estimate_round_rewards(**estimator_inputs[estimator_name])
estimated_round_rewards_df = DataFrame(estimated_round_rewards_dict)
estimated_round_rewards_df.rename(
columns={key: key.upper() for key in estimated_round_rewards_dict.keys()},
inplace=True,
)
if is_relative:
estimated_round_rewards_df /= self.bandit_feedback["reward"].mean()
plt.style.use("ggplot")
fig, ax = plt.subplots(figsize=(8, 6))
sns.barplot(
data=estimated_round_rewards_df,
ax=ax,
ci=100 * (1 - alpha),
n_boot=n_bootstrap_samples,
seed=random_state,
)
plt.xlabel("OPE Estimators", fontsize=25)
plt.ylabel(
f"Estimated Policy Value (± {np.int32(100*(1 - alpha))}% CI)", fontsize=20
)
plt.yticks(fontsize=15)
plt.xticks(fontsize=25 - 2 * len(self.ope_estimators))
if fig_dir:
fig.savefig(str(fig_dir / fig_name))
def evaluate_performance_of_estimators(
self,
ground_truth_policy_value: float,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
metric: str = "relative-ee",
) -> Dict[str, float]:
"""Evaluate estimation performance of OPE estimators.
Note
------
Evaluate the estimation performance of OPE estimators by relative estimation error (relative-EE) or squared error (SE):
.. math ::
\\text{Relative-EE} (\\hat{V}; \\mathcal{D}) = \\left| \\frac{\\hat{V}(\\pi; \\mathcal{D}) - V(\\pi)}{V(\\pi)} \\right|,
.. math ::
\\text{SE} (\\hat{V}; \\mathcal{D}) = \\left(\\hat{V}(\\pi; \\mathcal{D}) - V(\\pi) \\right)^2,
where :math:`V({\\pi})` is the ground-truth policy value of the evalation policy :math:`\\pi_e` (often estimated using on-policy estimation).
:math:`\\hat{V}(\\pi; \\mathcal{D})` is an estimated policy value by an OPE estimator :math:`\\hat{V}` and logged bandit feedback :math:`\\mathcal{D}`.
Parameters
----------
ground_truth policy value: float
Ground_truth policy value of evaluation policy, i.e., :math:`V(\\pi)`.
With Open Bandit Dataset, we use an on-policy estimate of the policy value as its ground-truth.
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by the (deterministic) evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
metric: str, default="relative-ee"
Evaluation metric to evaluate and compare the estimation performance of OPE estimators.
Must be "relative-ee" or "se".
Returns
----------
eval_metric_ope_dict: Dict[str, float]
Dictionary containing evaluation metric for evaluating the estimation performance of OPE estimators.
"""
check_scalar(
ground_truth_policy_value,
"ground_truth_policy_value",
float,
)
if metric not in ["relative-ee", "se"]:
raise ValueError(
f"metric must be either 'relative-ee' or 'se', but {metric} is given"
)
if metric == "relative-ee" and ground_truth_policy_value == 0.0:
raise ValueError(
"ground_truth_policy_value must be non-zero when metric is relative-ee"
)
eval_metric_ope_dict = dict()
estimator_inputs = self._create_estimator_inputs(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
for estimator_name, estimator in self.ope_estimators_.items():
estimated_policy_value = estimator.estimate_policy_value(
**estimator_inputs[estimator_name]
)
if metric == "relative-ee":
relative_ee_ = estimated_policy_value - ground_truth_policy_value
relative_ee_ /= ground_truth_policy_value
eval_metric_ope_dict[estimator_name] = np.abs(relative_ee_)
elif metric == "se":
se_ = (estimated_policy_value - ground_truth_policy_value) ** 2
eval_metric_ope_dict[estimator_name] = se_
return eval_metric_ope_dict
def summarize_estimators_comparison(
self,
ground_truth_policy_value: float,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
metric: str = "relative-ee",
) -> DataFrame:
"""Summarize performance comparison of OPE estimators.
Parameters
----------
ground_truth policy value: float
Ground_truth policy value of evaluation policy, i.e., :math:`V(\\pi)`.
With Open Bandit Dataset, we use an on-policy estimate of the policy value as ground-truth.
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by the (deterministic) evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
metric: str, default="relative-ee"
Evaluation metric to evaluate and compare the estimation performance of OPE estimators.
Must be either "relative-ee" or "se".
Returns
----------
eval_metric_ope_df: DataFrame
Evaluation metric to evaluate and compare the estimation performance of OPE estimators.
"""
eval_metric_ope_df = DataFrame(
self.evaluate_performance_of_estimators(
ground_truth_policy_value=ground_truth_policy_value,
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
metric=metric,
),
index=[metric],
)
return eval_metric_ope_df.T
def visualize_off_policy_estimates_of_multiple_policies(
self,
policy_name_list: List[str],
action_by_evaluation_policy_list: List[np.ndarray],
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
alpha: float = 0.05,
is_relative: bool = False,
n_bootstrap_samples: int = 100,
random_state: Optional[int] = None,
fig_dir: Optional[Path] = None,
fig_name: str = "estimated_policy_value.png",
) -> None:
"""Visualize policy values estimated by OPE estimators.
Parameters
----------
policy_name_list: List[str]
List of the names of evaluation policies.
action_by_evaluation_policy_list: List[array-like, shape (n_rounds, n_actions, len_list)]
List of action values given by the (deterministic) evaluation policies, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of an estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=100
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
is_relative: bool, default=False,
If True, the method visualizes the estimated policy values of evaluation policy
relative to the ground-truth policy value of behavior policy.
fig_dir: Path, default=None
Path to store the bar figure.
If 'None' is given, the figure will not be saved.
fig_name: str, default="estimated_policy_value.png"
Name of the bar figure.
"""
if len(policy_name_list) != len(action_by_evaluation_policy_list):
raise ValueError(
"the length of policy_name_list must be the same as action_by_evaluation_policy_list"
)
if fig_dir is not None:
assert isinstance(fig_dir, Path), "fig_dir must be a Path"
if fig_name is not None:
assert isinstance(fig_name, str), "fig_dir must be a string"
estimated_round_rewards_dict = {
estimator_name: {} for estimator_name in self.ope_estimators_
}
for policy_name, action_by_evaluation_policy in zip(
policy_name_list, action_by_evaluation_policy_list
):
estimator_inputs = self._create_estimator_inputs(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
for estimator_name, estimator in self.ope_estimators_.items():
estimated_round_rewards_dict[estimator_name][
policy_name
] = estimator._estimate_round_rewards(
**estimator_inputs[estimator_name]
)
plt.style.use("ggplot")
fig = plt.figure(figsize=(8, 6.2 * len(self.ope_estimators_)))
for i, estimator_name in enumerate(self.ope_estimators_):
estimated_round_rewards_df = DataFrame(
estimated_round_rewards_dict[estimator_name]
)
if is_relative:
estimated_round_rewards_df /= self.bandit_feedback["reward"].mean()
ax = fig.add_subplot(len(action_by_evaluation_policy_list), 1, i + 1)
sns.barplot(
data=estimated_round_rewards_df,
ax=ax,
ci=100 * (1 - alpha),
n_boot=n_bootstrap_samples,
seed=random_state,
)
ax.set_title(estimator_name.upper(), fontsize=20)
ax.set_ylabel(
f"Estimated Policy Value (± {np.int32(100*(1 - alpha))}% CI)",
fontsize=20,
)
plt.yticks(fontsize=15)
plt.xticks(fontsize=25 - 2 * len(policy_name_list))
if fig_dir:
fig.savefig(str(fig_dir / fig_name))
|
py | 1a3e6e549b7b08b02259b923fe4b2596657d141e | #!/usr/bin/python
import os
import sys
import requests
import certifi
import tarfile
import subprocess
# We are downloading a specific version of the Gcloud SDK because we have not
# found a URL to fetch the "latest" version.
# The installation updates the SDK so there is no need to update the downloaded
# version too often.
# If it is needed to update the downloaded version please refer to:
# https://cloud.google.com/sdk/downloads#versioned
GCLOUD_DOWNLOAD_URL = 'https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/'
GCLOUD_SDK_TAR_FILE = 'google-cloud-sdk-138.0.0-linux-x86_64.tar.gz'
GCLOUD_SDK_INSTALL_FOLDER = 'google-cloud-sdk'
TEMP_DIR = 'temp'
GCLOUD_SDK_PATH = os.path.join(TEMP_DIR, GCLOUD_SDK_INSTALL_FOLDER)
def _Download(url, to):
print 'Downloading %s to %s...' % (url, to)
response = requests.get(url, stream=True)
if response.status_code == 200:
print 'Downloading %s to %s...' % (url, to)
with open(to, 'w') as to_file:
for chunk in response.iter_content(chunk_size=1024):
to_file.write(chunk)
else:
raise NameError('Could not download: %s Error: %s' % (to,
str(response.status_code)))
def _Extract(file_to_extract_path, destination_path):
print 'Extracting %s in %s...' % (file_to_extract_path, destination_path)
with tarfile.open(file_to_extract_path, 'r:gz') as tar_file:
tar_file.extractall(destination_path)
def _EnsureAppEngineIsInstalled(path_to_gcloud_sdk):
gcloud_exec = os.path.join(path_to_gcloud_sdk, 'bin', 'gcloud')
subprocess.call([gcloud_exec, '--quiet',
'components', 'install', 'app-engine-python'])
subprocess.call([gcloud_exec, '--quiet',
'components', 'update'])
def _Cleanup(file_paths_to_remove):
for file_path in file_paths_to_remove:
if os.path.exists(file_path):
print 'Cleaning up %s' % file_path
os.remove(file_path)
def main():
if not os.path.exists(TEMP_DIR):
os.mkdir(TEMP_DIR)
if os.path.isfile(os.path.join(GCLOUD_SDK_PATH, 'bin', 'gcloud')):
print 'Already has %s, skipping the download' % GCLOUD_SDK_INSTALL_FOLDER
_EnsureAppEngineIsInstalled(GCLOUD_SDK_PATH)
_Cleanup([os.path.join(TEMP_DIR, GCLOUD_SDK_TAR_FILE)])
return
_Download(GCLOUD_DOWNLOAD_URL + GCLOUD_SDK_TAR_FILE,
os.path.join(TEMP_DIR, GCLOUD_SDK_TAR_FILE))
_Extract(os.path.join(TEMP_DIR, GCLOUD_SDK_TAR_FILE), TEMP_DIR)
_EnsureAppEngineIsInstalled(GCLOUD_SDK_PATH)
_Cleanup([os.path.join(TEMP_DIR, GCLOUD_SDK_TAR_FILE)])
if __name__ == "__main__":
sys.exit(main())
|
py | 1a3e6ffe74a8d0777c21d956a81b98acc8b8961b | from khal.terminal import colored, merge_columns
def test_colored():
assert colored('test', 'light cyan') == '\33[1;36mtest\x1b[0m'
assert colored('täst', 'white') == '\33[37mtäst\x1b[0m'
assert colored('täst', 'white', 'dark green') == '\x1b[37m\x1b[42mtäst\x1b[0m'
assert colored('täst', 'light magenta', 'dark green', True) == '\x1b[1;35m\x1b[42mtäst\x1b[0m'
assert colored('täst', 'light magenta', 'dark green', False) == '\x1b[95m\x1b[42mtäst\x1b[0m'
assert colored('täst', 'light magenta', 'light green', True) == '\x1b[1;35m\x1b[42mtäst\x1b[0m'
assert colored('täst', 'light magenta', 'light green', False) == '\x1b[95m\x1b[102mtäst\x1b[0m'
assert colored('täst', '5', '20') == '\x1b[38;5;5m\x1b[48;5;20mtäst\x1b[0m'
assert colored('täst', '#F0F', '#00AABB') == \
'\x1b[38;2;255;0;255m\x1b[48;2;0;170;187mtäst\x1b[0m'
class TestMergeColumns:
def test_longer_right(self):
left = ['uiae', 'nrtd']
right = ['123456', '234567', '345678']
out = ['uiae 123456',
'nrtd 234567',
' 345678']
assert merge_columns(left, right, width=4) == out
def test_longer_left(self):
left = ['uiae', 'nrtd', 'xvlc']
right = ['123456', '234567']
out = ['uiae 123456', 'nrtd 234567', 'xvlc ']
assert merge_columns(left, right, width=4) == out
|
py | 1a3e70167f479f3445c63124b264972a43faf868 | from __future__ import print_function
from builtins import range
import json
class selectionParser(object):
def __init__(self,selectStr):
self.__result={}
self.__strresult={}
strresult=json.loads(selectStr)
for k,v in strresult.items():
expandedvalues=[]
for w in v:
if len(w)==0:
self.__result[int(k)]=expandedvalues
self.__strresult[k]=[]
continue
###weed out [10]-like stuff just in case they exist
elif len(w)==1:
expandedvalues.append(w[0])
##weed out [10,10]-like stuff
elif len(w)==2 and w[0]==w[1]:
expandedvalues.append(w[0])
else:
for i in range(w[0],w[1]+1):
expandedvalues.append(i)
self.__result[int(k)]=expandedvalues
self.__strresult[k]=[str(x) for x in expandedvalues]
def runs(self):
return self.__result.keys()
def runsandls(self):
'''return expanded {run:lslist}
'''
return self.__result
def runsandlsStr(self):
'''return expanded {'run':lslist}
'''
return self.__strresult
def numruns(self):
return len(self.__result)
def numls(self,run):
return len(self.__result[run])
if __name__ == "__main__":
s=selectionParser('{"1":[[3,45]],"2":[[4,8],[10,10]],"3":[[]]}')
print('runs : ',s.runs())
print('full result : ',s.runsandls())
print('str result : ',s.runsandlsStr())
print('num runs : ',s.numruns())
print('numls in run : ',s.numls(1))
|
py | 1a3e7155e3565477c793a55fc441069986ac9f42 | from .analyzerException import AnalyzerException
|
py | 1a3e71f0b9022b4f64a7382771d016399f467bf0 | from __future__ import division, generators, print_function
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable as Var
import sys
import macarico.util
macarico.util.reseed()
from macarico.annealing import ExponentialAnnealing, stochastic
from macarico.lts.lols import BanditLOLS
from macarico.lts.reslope import Reslope
from macarico.lts.aggrevate import AggreVaTe
from macarico.tasks.sequence_labeler import Example, HammingLoss, HammingLossReference
from macarico.features.sequence import RNNFeatures, AttendAt
from macarico.features.actor import TransitionRNN
from macarico.policies.linear import LinearPolicy
from macarico.policies.bootstrap import BootstrapPolicy
def test1(use_bootstrap):
n_types = 10
n_labels = 4
print
print '# test sequence labeler on mod data with Reslope and', ('bootstrap' if use_bootstrap else 'boltzmann'), 'exploration'
data = macarico.util.make_sequence_mod_data(3000, 6, n_types, n_labels)
data = [Example(x, y, n_labels) for x, y in data]
if not use_bootstrap:
tRNN = TransitionRNN([RNNFeatures(n_types)], [AttendAt()], n_labels)
policy = LinearPolicy(tRNN, n_labels)
else:
rnns = [TransitionRNN([RNNFeatures(n_types)], [AttendAt()], n_labels, h_name='h%d' % i)
for i in xrange(5)]
policy = BootstrapPolicy(rnns, n_labels)
optimizer = torch.optim.Adam(policy.parameters(), lr=0.01)
p_ref = stochastic(ExponentialAnnealing(0.9))
macarico.util.trainloop(
training_data = data[:2048],
dev_data = data[2048:],
policy = policy,
Learner = lambda: Reslope(HammingLossReference(), policy, p_ref,
exploration=BanditLOLS.EXPLORE_BOOTSTRAP if use_bootstrap else \
BanditLOLS.EXPLORE_BOLTZMANN
),
losses = HammingLoss(),
optimizer = optimizer,
run_per_epoch = [p_ref.step],
train_eval_skip = 1,
bandit_evaluation = True,
n_epochs = 1,
)
if __name__ == '__main__':
test1(False)
test1(True)
|
py | 1a3e7241339547102485b82d351f93ccb35b2c93 | # Solution to Problem 0004
def solution():
palindromes = [0]
for i in range(100,1000):
for j in range(100,1000):
if (str(i*j) == str(i*j)[::-1]) and (i*j > palindromes[-1]):
palindromes.append(i*j)
return palindromes[-1]
if __name__ == "__main__":
print(solution()) |
py | 1a3e72b6d38e8ca1a4f51575571202ba875ba808 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import os.path as osp
import os
from PIL import Image
import numpy as np
import json
from transform import *
class CityScapes(Dataset):
def __init__(self, rootpth, cropsize=(640, 480), mode='train',
randomscale=(0.125, 0.25, 0.375, 0.5, 0.675, 0.75, 0.875, 1.0, 1.25, 1.5), *args, **kwargs):
super(CityScapes, self).__init__(*args, **kwargs)
assert mode in ('train', 'val', 'test', 'trainval')
self.mode = mode
print('self.mode', self.mode)
self.ignore_lb = 255
with open('./cityscapes_info.json', 'r') as fr:
labels_info = json.load(fr)
self.lb_map = {el['id']: el['trainId'] for el in labels_info} # cityscape标注了35个id,但是衡量时只用19个类别好像。
## parse img directory
self.imgs = {}
imgnames = []
impth = osp.join(rootpth, 'leftImg8bit', mode)
folders = os.listdir(impth)
for fd in folders:
fdpth = osp.join(impth, fd)
im_names = os.listdir(fdpth)
names = [el.replace('_leftImg8bit.png', '') for el in im_names]
impths = [osp.join(fdpth, el) for el in im_names]
imgnames.extend(names)
self.imgs.update(dict(zip(names, impths)))
## parse gt directory
self.labels = {}
gtnames = []
gtpth = osp.join(rootpth, 'gtFine', mode)
folders = os.listdir(gtpth)
for fd in folders:
fdpth = osp.join(gtpth, fd)
lbnames = os.listdir(fdpth)
lbnames = [el for el in lbnames if 'labelIds' in el]
names = [el.replace('_gtFine_labelIds.png', '') for el in lbnames]
lbpths = [osp.join(fdpth, el) for el in lbnames]
gtnames.extend(names)
self.labels.update(dict(zip(names, lbpths)))
self.imnames = imgnames
self.len = len(self.imnames)
print('self.len', self.mode, self.len)
assert set(imgnames) == set(gtnames)
assert set(self.imnames) == set(self.imgs.keys())
assert set(self.imnames) == set(self.labels.keys())
## pre-processing
self.to_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
self.trans_train = Compose([
ColorJitter(
brightness = 0.5,
contrast = 0.5,
saturation = 0.5),
HorizontalFlip(),
# RandomScale((0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0)),
RandomScale(randomscale),
# RandomScale((0.125, 1)),
# RandomScale((0.125, 0.25, 0.375, 0.5, 0.675, 0.75, 0.875, 1.0)),
# RandomScale((0.125, 0.25, 0.375, 0.5, 0.675, 0.75, 0.875, 1.0, 1.125, 1.25, 1.375, 1.5)),
RandomCrop(cropsize)
])
def __getitem__(self, idx):
fn = self.imnames[idx]
impth = self.imgs[fn]
lbpth = self.labels[fn]
img = Image.open(impth).convert('RGB')
label = Image.open(lbpth)
if self.mode == 'train' or self.mode == 'trainval':
im_lb = dict(im = img, lb = label)
im_lb = self.trans_train(im_lb)
img, label = im_lb['im'], im_lb['lb']
img = self.to_tensor(img)
label = np.array(label).astype(np.int64)[np.newaxis, :]
label = self.convert_labels(label)
return img, label
def __len__(self):
return self.len
def convert_labels(self, label):
for k, v in self.lb_map.items():
label[label == k] = v
return label
if __name__ == "__main__":
from tqdm import tqdm
ds = CityScapes('./data/', n_classes=19, mode='val')
uni = []
for im, lb in tqdm(ds):
lb_uni = np.unique(lb).tolist()
uni.extend(lb_uni)
print(uni)
print(set(uni))
|
py | 1a3e743854781e73bd721a3b90b88ea2b8b861d7 | import pytest
import json
from bitarray import bitarray
from bigsi.tests.base import CONFIGS
from bigsi import BIGSI
from bigsi.storage import get_storage
from bigsi.utils import seq_to_kmers
import pytest
def test_create():
for config in CONFIGS:
get_storage(config).delete_all()
bloomfilters = [BIGSI.bloom(config, ["ATC", "ATA"])]
samples = ["1"]
bigsi = BIGSI.build(config, bloomfilters, samples)
assert bigsi.kmer_size == 3
assert bigsi.bloomfilter_size == 1000
assert bigsi.num_hashes == 3
assert bigsi.num_samples == 1
assert bigsi.lookup("ATC") == {"ATC": bitarray("1")}
assert bigsi.colour_to_sample(0) == "1"
assert bigsi.sample_to_colour("1") == 0
bigsi.delete()
def test_insert():
for config in CONFIGS:
get_storage(config).delete_all()
bloomfilters = [BIGSI.bloom(config, ["ATC", "ATA"])]
samples = ["1"]
bigsi = BIGSI.build(config, bloomfilters, samples)
bloomfilter_2 = BIGSI.bloom(config, ["ATC", "ATT"])
bigsi.insert(bloomfilter_2, "2")
assert bigsi.kmer_size == 3
assert bigsi.bloomfilter_size == 1000
assert bigsi.num_hashes == 3
assert bigsi.num_samples == 2
assert bigsi.lookup(["ATC", "ATA", "ATT"]) == {
"ATC": bitarray("11"),
"ATA": bitarray("10"),
"ATT": bitarray("01"),
}
assert bigsi.colour_to_sample(0) == "1"
assert bigsi.sample_to_colour("1") == 0
assert bigsi.colour_to_sample(1) == "2"
assert bigsi.sample_to_colour("2") == 1
bigsi.delete()
def test_unique_sample_names():
for config in CONFIGS:
get_storage(config).delete_all()
bloom = BIGSI.bloom(config, ["ATC", "ATA"])
bigsi = BIGSI.build(config, [bloom], ["1"])
with pytest.raises(ValueError):
bigsi.insert(bloom, "1")
assert bigsi.num_samples == 1
assert bigsi.lookup(["ATC", "ATA", "ATT"]) == {
"ATC": bitarray("1"),
"ATA": bitarray("1"),
"ATT": bitarray("0"),
}
bigsi.delete()
def test_exact_search():
config = CONFIGS[0]
kmers_1 = seq_to_kmers("ATACACAAT", config["k"])
kmers_2 = seq_to_kmers("ACAGAGAAC", config["k"])
bloom1 = BIGSI.bloom(config, kmers_1)
bloom2 = BIGSI.bloom(config, kmers_2)
for config in CONFIGS:
get_storage(config).delete_all()
bigsi = BIGSI.build(config, [bloom1, bloom2], ["a", "b"])
assert bigsi.search("ATACACAAT")[0] == {
"percent_kmers_found": 100,
"num_kmers": 6,
"num_kmers_found": 6,
"sample_name": "a",
}
assert bigsi.search("ACAGAGAAC")[0] == {
"percent_kmers_found": 100,
"num_kmers": 6,
"num_kmers_found": 6,
"sample_name": "b",
}
assert bigsi.search("ACAGTTAAC") == []
bigsi.delete()
@pytest.mark.skip(
reason="Passes in isolation, but fails when run with the rest of the tests"
)
def test_inexact_search():
for config in CONFIGS:
get_storage(config).delete_all()
config = CONFIGS[0]
kmers_1 = seq_to_kmers("ATACACAAT", config["k"])
kmers_2 = seq_to_kmers("ATACACAAC", config["k"])
bloom1 = BIGSI.bloom(config, kmers_1)
bloom2 = BIGSI.bloom(config, kmers_2)
for config in CONFIGS:
get_storage(config).delete_all()
with pytest.raises(BaseException):
BIGSI(config)
bigsi = BIGSI.build(config, [bloom1, bloom2], ["a", "b"])
assert bigsi.search("ACAGTTAAC", 0.5) == []
assert bigsi.lookup("AAT") == {"AAT": bitarray("10")}
results = bigsi.search("ATACACAAT", 0.5)
assert results[0] == {
"percent_kmers_found": 100.0,
"num_kmers": 6,
"num_kmers_found": 6,
"sample_name": "a",
}
assert (
json.dumps(results[0])
== '{"percent_kmers_found": 100.0, "num_kmers": 6, "num_kmers_found": 6, "sample_name": "a"}'
)
assert results[1] == {
"percent_kmers_found": 83.33,
"num_kmers": 6,
"num_kmers_found": 5,
"sample_name": "b",
}
bigsi.delete()
def test_search_concordance():
config = CONFIGS[0]
seq_a = "ATACACAAT"
seq_b = "ATACACAAC"
kmers_1 = seq_to_kmers(seq_a, config["k"])
kmers_2 = seq_to_kmers(seq_b, config["k"])
bloom1 = BIGSI.bloom(config, kmers_1)
bloom2 = BIGSI.bloom(config, kmers_2)
for config in CONFIGS:
get_storage(config).delete_all()
bigsi = BIGSI.build(config, [bloom1, bloom2], ["a", "b"])
exp_result_a = {
"percent_kmers_found": 100.0,
"num_kmers": 6,
"num_kmers_found": 6,
"sample_name": "a"
}
inexact_results_a = sorted(bigsi.search(seq_a, 0.5),
key=lambda x: x["num_kmers_found"],
reverse=True)
assert len(inexact_results_a) == 2
assert inexact_results_a[0] == exp_result_a
exact_results_a = sorted(bigsi.search(seq_a, 1.0),
key=lambda x: x["num_kmers_found"],
reverse=True)
assert len(exact_results_a) == 1
assert exact_results_a[0] == exp_result_a
exp_result_b = {
"percent_kmers_found": 100.0,
"num_kmers": 6,
"num_kmers_found": 6,
"sample_name": "b"
}
inexact_results_b = sorted(bigsi.search(seq_b, 0.5),
key=lambda x: x["num_kmers_found"],
reverse=True)
assert len(inexact_results_b) == 2
assert inexact_results_b[0] == exp_result_b
exact_results_b = sorted(bigsi.search(seq_b, 1.0),
key=lambda x: x["num_kmers_found"],
reverse=True)
assert len(exact_results_b) == 1
assert exact_results_b[0] == exp_result_b
bigsi.delete()
##
@pytest.mark.skip(reason="TODO, fix test to work on single config")
def test_merge():
for config in CONFIGS:
get_storage(config).delete_all()
config = CONFIGS[0]
kmers_1 = seq_to_kmers("ATACACAAT", config["k"])
kmers_2 = seq_to_kmers("ATACACAAC", config["k"])
bloom1 = BIGSI.bloom(config, kmers_1)
bloom2 = BIGSI.bloom(config, kmers_2)
bigsi1 = BIGSI.build(CONFIGS[0], [bloom1], ["a"])
bigsi2 = BIGSI.build(CONFIGS[1], [bloom2], ["b"])
bigsic = BIGSI.build(CONFIGS[2], [bloom1, bloom2], ["a", "b"])
bigsi1.merge(bigsi2)
assert bigsi1.search("ATACACAAT", 0.5) == bigsic.search("ATACACAAT", 0.5)
bigsi1.delete()
bigsi2.delete()
bigsic.delete()
|
py | 1a3e74dae489513193afc11ee0ac58eeae569d89 | import pygame
from consts import RED, GREEN
class Player():
def __init__(self, x, y, name, width=100, height=100, color=RED):
self.x = x
self.y = y
self.width = width
self.height = height
self.color = color
self.name = name
self.update_rect()
self.vel = 3
def draw(self, win):
pygame.draw.rect(win, self.color, self.rect)
def move(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
self.x -= self.vel
if keys[pygame.K_RIGHT]:
self.x += self.vel
if keys[pygame.K_UP]:
self.y -= self.vel
if keys[pygame.K_DOWN]:
self.y += self.vel
self.update_rect()
def update_rect(self):
self.rect = (self.x, self.y, self.width, self.height)
def set_color(self, c):
self.color = c
class Game:
def __init__(self, id):
self.players = {}
self.id_count = 0
self.id = id
def add_player(self, current_player: int):
self.players[current_player] = Player(50, 50, str(current_player))
def get_player(self, player_id: int) -> Player:
return self.players[player_id]
def action(self, player_id: int, data: Player):
self.players[player_id] = data
|
py | 1a3e75615984e6ce1360046da51f42bdcd12649d | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
from projects.SimCSE.modeling.model_utils import MLPLayer, cosine_similarity
from projects.SimCSE.utils.load_huggingface_weight import load_huggingface_bert
from .bert_for_simcse import BertForSimCSE
class Simcse_sup(nn.Module):
def __init__(self, cfg):
super().__init__()
self.bert = BertForSimCSE(cfg)
self.mlp = MLPLayer(cfg)
self.pooler_type = cfg.pooler_type
if cfg.pretrained_model_weight is not None:
load_huggingface_bert(
self.bert,
cfg.pretrained_model_weight,
cfg["hidden_size"],
cfg["num_attention_heads"],
cfg["hidden_layers"],
)
def pooler(self, inputs, attention_mask):
if self.pooler_type == "cls":
return inputs[0][:, 0]
elif self.pooler_type == "pooled":
return inputs[1]
elif self.pooler_type == "last-avg":
last_hidden = inputs[0]
return (last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(
-1
).unsqueeze(-1)
elif self.pooler_type == "first-last-avg":
first_hidden = inputs[2][1]
last_hidden = inputs[0]
res = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(
1
) / attention_mask.sum(-1).unsqueeze(-1)
return res
def create_use_row(self, labels):
count = 0
use_row = []
for row in range(labels.size(0)):
if count % 2 == 0 and count != 0:
count = 0
continue
use_row.append(row)
count += 1
return flow.tensor(use_row, sbp=labels.sbp, placement=labels.placement)
def forward(self, input_ids, attention_mask, token_type_ids=None, labels=None):
if self.training:
bs = input_ids.size(0)
input_ids = input_ids.view(bs * 3, -1)
attention_mask = attention_mask.view(bs * 3, -1)
out = self.bert(input_ids, attention_mask)
out = self.pooler(out, attention_mask)
out = self.mlp(out)
labels = flow.arange(
out.size(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=out.placement,
)
use_row = self.create_use_row(labels)
labels = (use_row - use_row % 3 * 2) + 1
sim = cosine_similarity(out.unsqueeze(1), out.unsqueeze(0))
sim = (
sim
- flow.eye(
out.size(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=out.placement,
)
* 1e12
)
sim = flow.index_select(sim, dim=0, index=use_row)
sim = sim / 0.05
loss = nn.CrossEntropyLoss()(sim, labels)
return {"loss": loss}
else:
bs = input_ids.size(0)
input_ids = input_ids.view(bs * 2, -1)
attention_mask = attention_mask.view(bs * 2, -1)
out = self.bert(input_ids, attention_mask)
out = self.pooler(out, attention_mask)
self.mlp(out)
out = out.view(bs, 2, -1)
sent1 = out[:, 0]
sent2 = out[:, 1]
sim = cosine_similarity(sent1, sent2)
sim = sim.to_global(sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]))
return {"sim": sim.unsqueeze(1), "labels": labels}
|
py | 1a3e76130df89e3475fd5199597aa328f889a97c | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewaysOperations:
"""ApplicationGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> "models.ApplicationGateway":
"""Gets the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.ApplicationGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "models.ApplicationGateway",
**kwargs
) -> "models.ApplicationGateway":
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "models.ApplicationGateway",
**kwargs
) -> AsyncLROPoller["models.ApplicationGateway"]:
"""Creates or updates the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param parameters: Parameters supplied to the create or update application gateway operation.
:type parameters: ~azure.mgmt.network.v2018_11_01.models.ApplicationGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.ApplicationGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.ApplicationGateway":
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "models.TagsObject",
**kwargs
) -> AsyncLROPoller["models.ApplicationGateway"]:
"""Updates the specified application gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param parameters: Parameters supplied to update application gateway tags.
:type parameters: ~azure.mgmt.network.v2018_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.ApplicationGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.ApplicationGatewayListResult"]:
"""Lists all application gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.ApplicationGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.ApplicationGatewayListResult"]:
"""Gets all the application gateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.ApplicationGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'} # type: ignore
async def begin_start(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Starts the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Stops the specified application gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'} # type: ignore
async def _backend_health_initial(
self,
resource_group_name: str,
application_gateway_name: str,
expand: Optional[str] = None,
**kwargs
) -> Optional["models.ApplicationGatewayBackendHealth"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ApplicationGatewayBackendHealth"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._backend_health_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayBackendHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_backend_health_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'} # type: ignore
async def begin_backend_health(
self,
resource_group_name: str,
application_gateway_name: str,
expand: Optional[str] = None,
**kwargs
) -> AsyncLROPoller["models.ApplicationGatewayBackendHealth"]:
"""Gets the backend health of the specified application gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param expand: Expands BackendAddressPool and BackendHttpSettings referenced in backend health.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGatewayBackendHealth or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.ApplicationGatewayBackendHealth]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayBackendHealth"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._backend_health_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
expand=expand,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayBackendHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_backend_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'} # type: ignore
async def list_available_server_variables(
self,
**kwargs
) -> List[str]:
"""Lists all available server variables.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of str, or the result of cls(response)
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.list_available_server_variables.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[str]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_server_variables.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableServerVariables'} # type: ignore
async def list_available_request_headers(
self,
**kwargs
) -> List[str]:
"""Lists all available request headers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of str, or the result of cls(response)
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.list_available_request_headers.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[str]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_request_headers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableRequestHeaders'} # type: ignore
async def list_available_response_headers(
self,
**kwargs
) -> List[str]:
"""Lists all available response headers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of str, or the result of cls(response)
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.list_available_response_headers.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[str]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_response_headers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableResponseHeaders'} # type: ignore
async def list_available_waf_rule_sets(
self,
**kwargs
) -> "models.ApplicationGatewayAvailableWafRuleSetsResult":
"""Lists all available web application firewall rule sets.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayAvailableWafRuleSetsResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.ApplicationGatewayAvailableWafRuleSetsResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayAvailableWafRuleSetsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.list_available_waf_rule_sets.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayAvailableWafRuleSetsResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_waf_rule_sets.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableWafRuleSets'} # type: ignore
async def list_available_ssl_options(
self,
**kwargs
) -> "models.ApplicationGatewayAvailableSslOptions":
"""Lists available Ssl options for configuring Ssl policy.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayAvailableSslOptions, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.ApplicationGatewayAvailableSslOptions
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayAvailableSslOptions"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.list_available_ssl_options.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayAvailableSslOptions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_ssl_options.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default'} # type: ignore
def list_available_ssl_predefined_policies(
self,
**kwargs
) -> AsyncIterable["models.ApplicationGatewayAvailableSslPredefinedPolicies"]:
"""Lists all SSL predefined policies for configuring Ssl policy.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayAvailableSslPredefinedPolicies or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.ApplicationGatewayAvailableSslPredefinedPolicies]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayAvailableSslPredefinedPolicies"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_available_ssl_predefined_policies.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayAvailableSslPredefinedPolicies', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_ssl_predefined_policies.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies'} # type: ignore
async def get_ssl_predefined_policy(
self,
predefined_policy_name: str,
**kwargs
) -> "models.ApplicationGatewaySslPredefinedPolicy":
"""Gets Ssl predefined policy with the specified policy name.
:param predefined_policy_name: Name of Ssl predefined policy.
:type predefined_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewaySslPredefinedPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.ApplicationGatewaySslPredefinedPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewaySslPredefinedPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get_ssl_predefined_policy.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'predefinedPolicyName': self._serialize.url("predefined_policy_name", predefined_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewaySslPredefinedPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ssl_predefined_policy.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies/{predefinedPolicyName}'} # type: ignore
|
py | 1a3e76989c708cb5f16cdb0e339e7f822fa5c2da | from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['target', 'samples', 'internalformat', 'width', 'height'])
def glRenderbufferStorageMultisampleEXT(target, samples, internalformat, width, height):
pass
@params(api='gles3', prms=['target', 'attachment', 'textarget', 'texture', 'level', 'samples'])
def glFramebufferTexture2DMultisampleEXT(target, attachment, textarget, texture, level, samples):
pass
|
py | 1a3e7757347cb2dec5d4410bd7d0e259f57ce0ab | '''
Created on Feb 18, 2015
@author: jay7958
'''
import pickle
from crispy_forms.bootstrap import InlineRadios, InlineCheckboxes
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout
from django import forms
from django.db.models import Count
from dojo.forms import MultipleSelectWithPop
from .models import Engagement_Survey, Answered_Survey, TextAnswer, ChoiceAnswer, Choice, Question, TextQuestion, \
ChoiceQuestion
# List of validator_name:func_name
# Show in admin a multichoice list of validator names
# pass this to form using field_name='validator_name' ?
class QuestionForm(forms.Form):
''' Base class for a Question
'''
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_method = 'post'
# If true crispy-forms will render a <form>..</form> tags
self.helper.form_tag = kwargs.get('form_tag', True)
if 'form_tag' in kwargs:
del kwargs['form_tag']
self.answered_survey = kwargs.get('answered_survey')
if not self.answered_survey:
raise ValueError('Need an answered_survey to save answers too')
del kwargs['answered_survey']
self.helper.form_class = kwargs.get('form_class', '')
self.question = kwargs.get('question')
if not self.question:
raise ValueError('Need a question to render')
del kwargs['question']
super(QuestionForm, self).__init__(*args, **kwargs)
class TextQuestionForm(QuestionForm):
def __init__(self, *args, **kwargs):
super(TextQuestionForm, self).__init__(*args, **kwargs)
# work out initial data
initial_answer = TextAnswer.objects.filter(
answered_survey=self.answered_survey,
question=self.question
)
if initial_answer.exists():
initial_answer = initial_answer[0].answer
else:
initial_answer = ''
self.fields['answer'] = forms.CharField(
label=self.question.text,
widget=forms.Textarea(),
required=not self.question.optional,
initial=initial_answer,
)
answer = self.fields['answer']
def save(self):
if not self.is_valid():
raise forms.ValidationError('form is not valid')
answer = self.cleaned_data.get('answer')
if not answer:
if self.fields['answer'].required:
raise forms.ValidationError, 'Required'
return
text_answer, created = TextAnswer.objects.get_or_create(
answered_survey=self.answered_survey,
question=self.question,
)
if created:
text_answer.answered_survey = self.answered_survey
text_answer.answer = answer
text_answer.save()
class ChoiceQuestionForm(QuestionForm):
def __init__(self, *args, **kwargs):
super(ChoiceQuestionForm, self).__init__(*args, **kwargs)
choices = [(c.id, c.label) for c in self.question.choices.all()]
# initial values
initial_choices = []
choice_answer = ChoiceAnswer.objects.filter(
answered_survey=self.answered_survey,
question=self.question,
).annotate(a=Count('answer')).filter(a__gt=0)
# we have ChoiceAnswer instance
if choice_answer:
choice_answer = choice_answer[0]
initial_choices = choice_answer.answer.all().values_list('id',
flat=True)
if self.question.multichoice is False:
initial_choices = initial_choices[0]
# default classes
widget = forms.RadioSelect
field_type = forms.ChoiceField
inline_type = InlineRadios
if self.question.multichoice:
field_type = forms.MultipleChoiceField
widget = forms.CheckboxSelectMultiple
inline_type = InlineCheckboxes
field = field_type(
label=self.question.text,
required=not self.question.optional,
choices=choices,
initial=initial_choices,
widget=widget
)
self.fields['answer'] = field
# Render choice buttons inline
self.helper.layout = Layout(
inline_type('answer')
)
def clean_answer(self):
real_answer = self.cleaned_data.get('answer')
# for single choice questions, the selected answer is a single string
if type(real_answer) is not list:
real_answer = [real_answer]
return real_answer
def save(self):
if not self.is_valid():
raise forms.ValidationError('Form is not valid')
real_answer = self.cleaned_data.get('answer')
if not real_answer:
if self.fields['answer'].required:
raise forms.ValidationError, 'Required'
return
choices = Choice.objects.filter(id__in=real_answer)
# find ChoiceAnswer and filter in answer !
choice_answer = ChoiceAnswer.objects.filter(
answered_survey=self.answered_survey,
question=self.question,
)
# we have ChoiceAnswer instance
if choice_answer:
choice_answer = choice_answer[0]
if not choice_answer:
# create a ChoiceAnswer
choice_answer = ChoiceAnswer.objects.create(
answered_survey=self.answered_survey,
question=self.question
)
# re save out the choices
choice_answer.answered_survey = self.answered_survey
choice_answer.answer = choices
choice_answer.save()
class Add_Survey_Form(forms.ModelForm):
survey = forms.ModelChoiceField(
queryset=Engagement_Survey.objects.all(),
required=True,
widget=forms.widgets.Select(),
help_text='Select the Survey to add.')
class Meta:
model = Answered_Survey
exclude = ('responder',
'completed',
'engagement',
'answered_on')
class Delete_Survey_Form(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Answered_Survey
exclude = ('responder',
'completed',
'engagement',
'answered_on',
'survey')
class Delete_Eng_Survey_Form(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Engagement_Survey
exclude = ('name',
'questions',
'description',
'active')
class CreateSurveyForm(forms.ModelForm):
class Meta:
model = Engagement_Survey
exclude = ['questions']
class EditSurveyQuestionsForm(forms.ModelForm):
questions = forms.ModelMultipleChoiceField(
Question.objects.all(),
required=True,
help_text="Select questions to include on this survey. Field can be used to search available questions.",
widget=MultipleSelectWithPop(attrs={'size': '11'}))
class Meta:
model = Engagement_Survey
exclude = ['name', 'description', 'active']
class CreateQuestionForm(forms.Form):
type = forms.ChoiceField(choices=(("---", "-----"), ("text", "Text"), ("choice", "Choice")))
order = forms.IntegerField(min_value=1, widget=forms.TextInput(attrs={'data-type': 'both'}))
optional = forms.BooleanField(help_text="If selected, user doesn't have to answer this question",
initial=False,
required=False,
widget=forms.CheckboxInput(attrs={'data-type': 'both'}))
class CreateTextQuestionForm(forms.Form):
text = forms.CharField(widget=forms.Textarea(attrs={'data-type': 'text'}),
label="Question Text",
help_text="The actual question.")
class Meta:
model = TextQuestion
exclude = ['order', 'optional']
class MultiWidgetBasic(forms.widgets.MultiWidget):
def __init__(self, attrs=None):
widgets = [forms.TextInput(attrs={'data-type': 'choice'}),
forms.TextInput(attrs={'data-type': 'choice'}),
forms.TextInput(attrs={'data-type': 'choice'}),
forms.TextInput(attrs={'data-type': 'choice'}),
forms.TextInput(attrs={'data-type': 'choice'}),
forms.TextInput(attrs={'data-type': 'choice'})]
super(MultiWidgetBasic, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return pickle.loads(value)
else:
return [None, None, None, None, None, None]
def format_output(self, rendered_widgets):
return '<br/>'.join(rendered_widgets)
class MultiExampleField(forms.fields.MultiValueField):
widget = MultiWidgetBasic
def __init__(self, *args, **kwargs):
list_fields = [forms.fields.CharField(required=True),
forms.fields.CharField(required=True),
forms.fields.CharField(required=False),
forms.fields.CharField(required=False),
forms.fields.CharField(required=False),
forms.fields.CharField(required=False)]
super(MultiExampleField, self).__init__(list_fields, *args, **kwargs)
def compress(self, values):
return pickle.dumps(values)
class CreateChoiceQuestionForm(forms.Form):
c_text = forms.CharField(widget=forms.Textarea(attrs={'data-type': 'choice'}),
label="Question Text",
help_text="The actual question.")
multichoice = forms.BooleanField(required=False,
initial=False,
widget=forms.CheckboxInput(attrs={'data-type': 'choice'}),
help_text="Can more than one choice can be selected?")
answer_choices = MultiExampleField(required=False, widget=MultiWidgetBasic(attrs={'data-type': 'choice'}))
class Meta:
model = ChoiceQuestion
exclude = ['order', 'optional', 'choices']
class EditQuestionForm(forms.ModelForm):
class Meta:
model = Question
exclude = []
class EditTextQuestionForm(EditQuestionForm):
class Meta:
model = TextQuestion
exclude = []
class EditChoiceQuestionForm(EditQuestionForm):
choices = forms.ModelMultipleChoiceField(
Choice.objects.all(),
required=True,
help_text="Select choices to include on this question. Field can be used to search available choices.",
widget=MultipleSelectWithPop(attrs={'size': '11'}))
class Meta:
model = ChoiceQuestion
exclude = []
class AddChoicesForm(forms.ModelForm):
class Meta:
model = Choice
exclude = []
|
py | 1a3e785076e77eed76bb23b024a04985cc8fd041 | import pandas as pd
import pytest
import torch
from deepdow.benchmarks import OneOverN
from deepdow.callbacks import Callback
from deepdow.experiments import History, Run
from deepdow.losses import MeanReturns, StandardDeviation
from deepdow.nn import DummyNet
def test_basic():
n_channels = 2
x = torch.rand(10, n_channels, 4, 5)
network = DummyNet(n_channels=n_channels)
y = network(x)
print(y)
def test_history():
history = History()
history.add_entry(model='whatever', epoch=1)
history.add_entry(model='whatever_2', epoch=1, value=3)
history.add_entry(model='1111', epoch=2)
metrics_1 = history.metrics_per_epoch(1)
metrics_2 = history.metrics_per_epoch(2)
metrics_all = history.metrics
assert isinstance(metrics_1, pd.DataFrame)
assert isinstance(metrics_2, pd.DataFrame)
assert isinstance(metrics_all, pd.DataFrame)
assert len(metrics_1) == 2
assert len(metrics_2) == 1
assert len(metrics_all) == 3
with pytest.raises(KeyError):
history.metrics_per_epoch(3)
history.pretty_print(epoch=1)
history.pretty_print(epoch=None)
class TestRun:
def test_wrong_construction_1(self, dataloader_dummy):
"""Wrong positional arguments."""
with pytest.raises(TypeError):
Run('this_is_fake', MeanReturns(), dataloader_dummy)
with pytest.raises(TypeError):
Run(DummyNet(), 'this_is_fake', dataloader_dummy)
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), 'this_is_fake')
def test_wrong_construction_2(self, dataloader_dummy):
"""Wrong keyword arguments."""
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics='this_is_fake')
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics={'a': 'this_is_fake'})
with pytest.raises(ValueError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics={'loss': MeanReturns()})
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, val_dataloaders='this_is_fake')
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, val_dataloaders={'val': 'this_is_fake'})
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks='this_is_fake')
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks={'uniform': 'this_is_fake'})
with pytest.raises(ValueError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks={'main': OneOverN()})
@pytest.mark.parametrize('additional_kwargs', [True, False])
def test_attributes_after_construction(self, dataloader_dummy, additional_kwargs):
network = DummyNet()
loss = MeanReturns()
kwargs = {}
if additional_kwargs:
kwargs.update({'metrics': {'std': StandardDeviation()},
'val_dataloaders': {'val': dataloader_dummy},
'benchmarks': {'whatever': OneOverN()}})
run = Run(network, loss, dataloader_dummy, **kwargs)
assert network is run.network
assert loss is run.loss
assert dataloader_dummy is run.train_dataloader
assert isinstance(run.metrics, dict)
assert isinstance(run.val_dataloaders, dict)
assert isinstance(run.hparams, dict)
def test_launch(self, dataloader_dummy):
network = DummyNet(n_channels=dataloader_dummy.dataset.X.shape[1])
loss = MeanReturns()
run = Run(network, loss, dataloader_dummy)
run.launch(n_epochs=1)
def test_launch_interrupt(self, dataloader_dummy, monkeypatch):
network = DummyNet(n_channels=dataloader_dummy.dataset.X.shape[1])
loss = MeanReturns()
class TempCallback(Callback):
def on_train_begin(self, metadata):
raise KeyboardInterrupt()
monkeypatch.setattr('time.sleep', lambda x: None)
run = Run(network, loss, dataloader_dummy, callbacks=[TempCallback()])
run.launch(n_epochs=1)
|
py | 1a3e787ef3de03dfb66fcf83c6b14b66dd0b53e3 | import re
from pyspark import SparkConf, SparkContext
def normalizeWords(text):
return re.compile(r'\W+', re.UNICODE).split(text.lower())
conf = SparkConf().setMaster("local").setAppName("WordCount")
sc = SparkContext(conf = conf)
input = sc.textFile("file:///sparkcourse/book.txt")
words = input.flatMap(normalizeWords)
wordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
wordCountsSorted = wordCounts.map(lambda x: (x[1], x[0])).sortByKey()
results = wordCountsSorted.collect()
for result in results:
count = str(result[0])
word = result[1].encode('ascii', 'ignore')
if (word):
print(word.decode() + ":\t\t" + count)
|
py | 1a3e795ebb8e7e56e9dfc06a36a58730d3a45af0 | """This module contains the general information for SmartcallhomeProfile ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class SmartcallhomeProfileConsts():
FORMAT_FULL_TXT = "fullTxt"
FORMAT_SHORT_TXT = "shortTxt"
FORMAT_XML = "xml"
LEVEL_CRITICAL = "critical"
LEVEL_DEBUG = "debug"
LEVEL_DISASTER = "disaster"
LEVEL_FATAL = "fatal"
LEVEL_MAJOR = "major"
LEVEL_MINOR = "minor"
LEVEL_NORMAL = "normal"
LEVEL_NOTIFICATION = "notification"
LEVEL_WARNING = "warning"
class SmartcallhomeProfile(ManagedObject):
"""This is SmartcallhomeProfile class."""
consts = SmartcallhomeProfileConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("SmartcallhomeProfile", "smartcallhomeProfile", "smart-profile-[name]", VersionMeta.Version141a, "InputOutput", 0x3ff, [], ["admin", "operations"], [u'callhomeEp'], [], ["Get"])
prop_meta = {
"alert_groups": MoPropertyMeta("alert_groups", "alertGroups", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x2, None, None, r"""((defaultValue|unknown|diagnostic|environmental|inventory|all),){0,5}(defaultValue|unknown|diagnostic|environmental|inventory|all){0,1}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"format": MoPropertyMeta("format", "format", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["fullTxt", "shortTxt", "xml"], []),
"level": MoPropertyMeta("level", "level", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["critical", "debug", "disaster", "fatal", "major", "minor", "normal", "notification", "warning"], []),
"max_size": MoPropertyMeta("max_size", "maxSize", "uint", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["1-5000000"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version141a, MoPropertyMeta.NAMING, 0x80, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, 0x100, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"alertGroups": "alert_groups",
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"format": "format",
"level": "level",
"maxSize": "max_size",
"name": "name",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.alert_groups = None
self.child_action = None
self.descr = None
self.format = None
self.level = None
self.max_size = None
self.status = None
ManagedObject.__init__(self, "SmartcallhomeProfile", parent_mo_or_dn, **kwargs)
|
py | 1a3e79621f86648f94538d27a543a2036f5338b7 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for converting parsed doc content into markdown pages.
The adjacent `parser` module creates `PageInfo` objects, containing all data
necessary to document an element of the TensorFlow API.
This module contains one public function, which handels the conversion of these
`PageInfo` objects into a markdown string:
md_page = build_md_page(page_info)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
def build_md_page(page_info):
"""Given a PageInfo object, return markdown for the page.
Args:
page_info: must be a `parser.FunctionPageInfo`, `parser.ClassPageInfo`, or
`parser.ModulePageInfo`
Returns:
Markdown for the page
Raises:
ValueError: if `page_info` is an instance of an unrecognized class
"""
if page_info.for_function():
return _build_function_page(page_info)
if page_info.for_class():
return _build_class_page(page_info)
if page_info.for_module():
return _build_module_page(page_info)
raise ValueError('Unknown Page Info Type: %s' % type(page_info))
def _build_function_page(page_info):
"""Given a FunctionPageInfo object Return the page as an md string."""
parts = ['# %s\n\n' % page_info.full_name]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.signature is not None:
parts.append(_build_signature(page_info))
if page_info.defined_in:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
return ''.join(parts)
def _build_class_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# {page_info.full_name}\n\n'.format(page_info=page_info)]
parts.append('## Class `%s`\n\n' % page_info.full_name.split('.')[-1])
if page_info.bases:
parts.append('Inherits From: ')
link_template = '[`{short_name}`]({url})'
parts.append(', '.join(
link_template.format(**base._asdict()) for base in page_info.bases))
parts.append('\n\n')
# Sort the methods list, but make sure constructors come first.
constructor_names = ['__init__', '__new__']
constructors = sorted(
method for method in page_info.methods
if method.short_name in constructor_names)
other_methods = sorted(
method for method in page_info.methods
if method.short_name not in constructor_names)
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* Class `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
if constructors:
for method_info in constructors:
parts.append(_build_method_section(method_info, heading_level=2))
parts.append('\n\n')
if page_info.classes:
parts.append('## Child Classes\n')
link_template = ('[`class {class_info.short_name}`]'
'({class_info.url})\n\n')
class_links = sorted(
link_template.format(class_info=class_info)
for class_info in page_info.classes)
parts.extend(class_links)
if page_info.properties:
parts.append('## Properties\n\n')
for prop_info in page_info.properties:
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
parts.append(h3.format(short_name=prop_info.short_name))
parts.append(prop_info.doc.docstring)
parts.append(_build_function_details(prop_info.doc.function_details))
parts.append(_build_compatibility(prop_info.doc.compatibility))
parts.append('\n\n')
parts.append('\n\n')
if other_methods:
parts.append('## Methods\n\n')
for method_info in other_methods:
parts.append(_build_method_section(method_info))
parts.append('\n\n')
if page_info.other_members:
parts.append('## Class Members\n\n')
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
others_member_headings = (h3.format(short_name=info.short_name)
for info in sorted(page_info.other_members))
parts.extend(others_member_headings)
return ''.join(parts)
def _build_method_section(method_info, heading_level=3):
"""Generates a markdown section for a method.
Args:
method_info: A `MethodInfo` object.
heading_level: An Int, which HTML heading level to use.
Returns:
A markdown string.
"""
parts = []
heading = ('<h{heading_level} id="{short_name}">'
'<code>{short_name}</code>'
'</h{heading_level}>\n\n')
parts.append(heading.format(heading_level=heading_level,
**method_info._asdict()))
if method_info.signature is not None:
parts.append(_build_signature(method_info, use_full_name=False))
parts.append(method_info.doc.docstring)
parts.append(_build_function_details(method_info.doc.function_details))
parts.append(_build_compatibility(method_info.doc.compatibility))
parts.append('\n\n')
return ''.join(parts)
def _build_module_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# Module: {full_name}\n\n'.format(full_name=page_info.full_name)]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* Module `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.doc.docstring)
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
if page_info.modules:
parts.append('## Modules\n\n')
template = '[`{short_name}`]({url}) module'
for item in page_info.modules:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.classes:
parts.append('## Classes\n\n')
template = '[`class {short_name}`]({url})'
for item in page_info.classes:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.functions:
parts.append('## Functions\n\n')
template = '[`{short_name}(...)`]({url})'
for item in page_info.functions:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.other_members:
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
parts.append('## Other Members\n\n')
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
for item in page_info.other_members:
parts.append(h3.format(**item._asdict()))
return ''.join(parts)
def _build_signature(obj_info, use_full_name=True):
"""Returns a md code block showing the function signature."""
# Special case tf.range, since it has an optional first argument
if obj_info.full_name == 'tf.range':
return (
'``` python\n'
"tf.range(limit, delta=1, dtype=None, name='range')\n"
"tf.range(start, limit, delta=1, dtype=None, name='range')\n"
'```\n\n')
parts = ['``` python']
parts.extend(['@' + dec for dec in obj_info.decorators])
signature_template = '{name}({sig})'
if not obj_info.signature:
sig = ''
elif len(obj_info.signature) == 1:
sig = obj_info.signature[0]
else:
sig = ',\n'.join(' %s' % sig_item for sig_item in obj_info.signature)
sig = '\n'+sig+'\n'
if use_full_name:
obj_name = obj_info.full_name
else:
obj_name = obj_info.short_name
parts.append(signature_template.format(name=obj_name, sig=sig))
parts.append('```\n\n')
return '\n'.join(parts)
def _build_compatibility(compatibility):
"""Return the compatibility section as an md string."""
parts = []
sorted_keys = sorted(compatibility.keys())
for key in sorted_keys:
value = compatibility[key]
# Dedent so that it does not trigger markdown code formatting.
value = textwrap.dedent(value)
parts.append('\n\n#### %s Compatibility\n%s\n' % (key.title(), value))
return ''.join(parts)
def _build_function_details(function_details):
"""Return the function details section as an md string."""
parts = []
for detail in function_details:
sub = []
sub.append('#### ' + detail.keyword + ':\n\n')
sub.append(textwrap.dedent(detail.header))
for key, value in detail.items:
sub.append('* <b>`%s`</b>: %s' % (key, value))
parts.append(''.join(sub))
return '\n'.join(parts)
|
py | 1a3e7ab6d00f9eb5f10e98b78e2c14e50f006962 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import errno
import os
import sys
from datetime import datetime
from functools import partial
import time
_socket = __import__("socket")
# workaround on osx, disable kqueue
if sys.platform == "darwin":
os.environ['EVENT_NOKQUEUE'] = "1"
try:
import gevent
except ImportError:
raise RuntimeError("You need gevent installed to use this worker.")
from gevent.pool import Pool
from gevent.server import StreamServer
from gevent.socket import wait_write, socket
from gevent import pywsgi
import gunicorn
from gunicorn.workers.async import AsyncWorker
from gunicorn.http.wsgi import sendfile as o_sendfile
VERSION = "gevent/%s gunicorn/%s" % (gevent.__version__, gunicorn.__version__)
def _gevent_sendfile(fdout, fdin, offset, nbytes):
while True:
try:
return o_sendfile(fdout, fdin, offset, nbytes)
except OSError as e:
if e.args[0] == errno.EAGAIN:
wait_write(fdout)
else:
raise
def patch_sendfile():
from gunicorn.http import wsgi
if o_sendfile is not None:
setattr(wsgi, "sendfile", _gevent_sendfile)
BASE_WSGI_ENV = {
'GATEWAY_INTERFACE': 'CGI/1.1',
'SERVER_SOFTWARE': VERSION,
'SCRIPT_NAME': '',
'wsgi.version': (1, 0),
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False
}
class GeventWorker(AsyncWorker):
server_class = None
wsgi_handler = None
def patch(self):
from gevent import monkey
monkey.noisy = False
# if the new version is used make sure to patch subprocess
if gevent.version_info[0] == 0:
monkey.patch_all()
else:
monkey.patch_all(subprocess=True)
# monkey patch sendfile to make it none blocking
patch_sendfile()
# patch sockets
sockets = []
for s in self.sockets:
if sys.version_info[0] == 3:
sockets.append(socket(s.FAMILY, _socket.SOCK_STREAM,
fileno=s.sock.fileno()))
else:
sockets.append(socket(s.FAMILY, _socket.SOCK_STREAM,
_sock=s))
self.sockets = sockets
def notify(self):
super(GeventWorker, self).notify()
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
sys.exit(0)
def timeout_ctx(self):
return gevent.Timeout(self.cfg.keepalive, False)
def run(self):
servers = []
ssl_args = {}
if self.cfg.is_ssl:
ssl_args = dict(server_side=True, **self.cfg.ssl_options)
for s in self.sockets:
s.setblocking(1)
pool = Pool(self.worker_connections)
if self.server_class is not None:
server = self.server_class(
s, application=self.wsgi, spawn=pool, log=self.log,
handler_class=self.wsgi_handler, **ssl_args)
else:
hfun = partial(self.handle, s)
server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args)
server.start()
servers.append(server)
try:
while self.alive:
self.notify()
gevent.sleep(1.0)
except KeyboardInterrupt:
pass
except:
for server in servers:
try:
server.stop()
except:
pass
raise
try:
# Stop accepting requests
for server in servers:
if hasattr(server, 'close'): # gevent 1.0
server.close()
if hasattr(server, 'kill'): # gevent < 1.0
server.kill()
# Handle current requests until graceful_timeout
ts = time.time()
while time.time() - ts <= self.cfg.graceful_timeout:
accepting = 0
for server in servers:
if server.pool.free_count() != server.pool.size:
accepting += 1
# if no server is accepting a connection, we can exit
if not accepting:
return
self.notify()
gevent.sleep(1.0)
# Force kill all active the handlers
self.log.warning("Worker graceful timeout (pid:%s)" % self.pid)
[server.stop(timeout=1) for server in servers]
except:
pass
def handle_request(self, *args):
try:
super(GeventWorker, self).handle_request(*args)
except gevent.GreenletExit:
pass
except SystemExit:
pass
def handle_quit(self, sig, frame):
# Move this out of the signal handler so we can use
# blocking calls. See #1126
gevent.spawn(super(GeventWorker, self).handle_quit, sig, frame)
if gevent.version_info[0] == 0:
def init_process(self):
# monkey patch here
self.patch()
# reinit the hub
import gevent.core
gevent.core.reinit()
#gevent 0.13 and older doesn't reinitialize dns for us after forking
#here's the workaround
gevent.core.dns_shutdown(fail_requests=1)
gevent.core.dns_init()
super(GeventWorker, self).init_process()
else:
def init_process(self):
# monkey patch here
self.patch()
# reinit the hub
from gevent import hub
hub.reinit()
# then initialize the process
super(GeventWorker, self).init_process()
class GeventResponse(object):
status = None
headers = None
sent = None
def __init__(self, status, headers, clength):
self.status = status
self.headers = headers
self.sent = clength
class PyWSGIHandler(pywsgi.WSGIHandler):
def log_request(self):
start = datetime.fromtimestamp(self.time_start)
finish = datetime.fromtimestamp(self.time_finish)
response_time = finish - start
resp_headers = getattr(self, 'response_headers', {})
resp = GeventResponse(self.status, resp_headers, self.response_length)
if hasattr(self, 'headers'):
req_headers = [h.split(":", 1) for h in self.headers.headers]
else:
req_headers = []
self.server.log.access(resp, req_headers, self.environ, response_time)
def get_environ(self):
env = super(PyWSGIHandler, self).get_environ()
env['gunicorn.sock'] = self.socket
env['RAW_URI'] = self.path
return env
class PyWSGIServer(pywsgi.WSGIServer):
base_env = BASE_WSGI_ENV
class GeventPyWSGIWorker(GeventWorker):
"The Gevent StreamServer based workers."
server_class = PyWSGIServer
wsgi_handler = PyWSGIHandler
|
py | 1a3e7ac29784dde71bd681188034e6006f1c1c6b | import socket
import time
import pychromecast
from gtts import gTTS
def get_speaker(ip_addr=None, name=None):
if ip_addr:
return pychromecast.Chromecast(str(ip_addr))
speakers = pychromecast.get_chromecasts()
if len(speakers) == 0:
print("No devices are found")
raise Exception
if name:
return next(s for s in speakers if s.device.friendly_name == name)
return next(speakers)
def speak(text, speaker, lang="en"):
try:
tts = gTTS(text=text, lang=lang)
urls = tts.get_urls()
if not speaker.is_idle:
print("Killing current running app")
speaker.quit_app()
time.sleep(5)
speaker.wait()
speaker.media_controller.play_media(urls[0], "audit/mp3")
speaker.media_controller.block_until_active()
except Exception as error:
print(str(error))
raise Exception
def check_speaker(speaker, lang):
try:
speak(text="OK", speaker=speaker, lang=lang)
print("You are ready to speak!")
return True
except Exception as error:
print("Try an another ip or name: %s" % (str(error)))
return False
def prepare_speaker():
print("Enter language (English: en or Japanese: ja): ", end="")
lang = input()
print("Enter Google Home name or IP: ", end="")
name_or_ip = input()
try:
socket.inet_aton(name_or_ip)
speaker = get_speaker(ip_addr=name_or_ip)
except socket.error:
speaker = get_speaker(name=name_or_ip)
except Exception as error:
print("Error: %s" % (str(error)))
raise Exception
return speaker, lang
def main():
while True:
try:
speaker, lang = prepare_speaker()
except Exception:
continue
if check_speaker(speaker, lang):
break
print("Failed to setup. Try again!")
print("Start typing ...")
text = ""
while text != "bye":
print(">> ", end="")
text = input()
if text:
speak(text, speaker, lang)
if __name__ == "__main__":
main()
|
py | 1a3e7b6bdfeaa6742e78c8a9a652e947ad43c42b | from .request_util import *
from .throttle import Throttle
|
py | 1a3e7ba6c4d6898f4a65aa2081b3d939f00516cc | #python
import lx
import modo
scene = modo.Scene()
lx.eval('layer.new')
lx.eval('query sceneservice mesh.id ? first');
#-----------------------------------------
#-----------------------------------------
lx.eval('tool.set prim.cube on');
lx.eval('tool.reset prim.cube');
#zero its position
lx.eval('tool.setAttr prim.cube cenX 0.0');
lx.eval('tool.setAttr prim.cube cenY 0.0');
lx.eval('tool.setAttr prim.cube cenZ 0.0');
#Size
lx.eval('tool.setAttr prim.cube sizeX 0.5');
lx.eval('tool.setAttr prim.cube sizeY 8.0');
lx.eval('tool.setAttr prim.cube sizeZ 0.2');
#give it segments
#lx.eval('tool.setAttr prim.cube sides 24');
lx.eval('tool.attr prim.cube segmentsZ 4');
lx.eval('tool.attr prim.cube segmentsX 4');
lx.eval('tool.attr prim.cube segmentsY 48');
lx.eval('tool.setAttr prim.cube axis y');
lx.eval('tool.setAttr prim.cube uvs true');
lx.eval('tool.apply');
lx.eval('tool.set prim.cube off 0');
lx.eval('poly.convert face subpatch true');
#drop tool
lx.eval('tool.clearTask snap');
lx.eval('select.drop item');
|
py | 1a3e7bb621d836985689f78b7c45dcc5a7fe4926 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetBucketObjectResult:
"""
A collection of values returned by getBucketObject.
"""
def __init__(__self__, body=None, bucket=None, cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_length=None, content_type=None, etag=None, expiration=None, expires=None, id=None, key=None, last_modified=None, metadata=None, object_lock_legal_hold_status=None, object_lock_mode=None, object_lock_retain_until_date=None, range=None, server_side_encryption=None, sse_kms_key_id=None, storage_class=None, tags=None, version_id=None, website_redirect_location=None):
if body and not isinstance(body, str):
raise TypeError("Expected argument 'body' to be a str")
__self__.body = body
"""
Object data (see **limitations above** to understand cases in which this field is actually available)
"""
if bucket and not isinstance(bucket, str):
raise TypeError("Expected argument 'bucket' to be a str")
__self__.bucket = bucket
if cache_control and not isinstance(cache_control, str):
raise TypeError("Expected argument 'cache_control' to be a str")
__self__.cache_control = cache_control
"""
Specifies caching behavior along the request/reply chain.
"""
if content_disposition and not isinstance(content_disposition, str):
raise TypeError("Expected argument 'content_disposition' to be a str")
__self__.content_disposition = content_disposition
"""
Specifies presentational information for the object.
"""
if content_encoding and not isinstance(content_encoding, str):
raise TypeError("Expected argument 'content_encoding' to be a str")
__self__.content_encoding = content_encoding
"""
Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.
"""
if content_language and not isinstance(content_language, str):
raise TypeError("Expected argument 'content_language' to be a str")
__self__.content_language = content_language
"""
The language the content is in.
"""
if content_length and not isinstance(content_length, float):
raise TypeError("Expected argument 'content_length' to be a float")
__self__.content_length = content_length
"""
Size of the body in bytes.
"""
if content_type and not isinstance(content_type, str):
raise TypeError("Expected argument 'content_type' to be a str")
__self__.content_type = content_type
"""
A standard MIME type describing the format of the object data.
"""
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
__self__.etag = etag
"""
[ETag](https://en.wikipedia.org/wiki/HTTP_ETag) generated for the object (an MD5 sum of the object content in case it's not encrypted)
"""
if expiration and not isinstance(expiration, str):
raise TypeError("Expected argument 'expiration' to be a str")
__self__.expiration = expiration
"""
If the object expiration is configured (see [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)), the field includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.
"""
if expires and not isinstance(expires, str):
raise TypeError("Expected argument 'expires' to be a str")
__self__.expires = expires
"""
The date and time at which the object is no longer cacheable.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
The provider-assigned unique ID for this managed resource.
"""
if key and not isinstance(key, str):
raise TypeError("Expected argument 'key' to be a str")
__self__.key = key
if last_modified and not isinstance(last_modified, str):
raise TypeError("Expected argument 'last_modified' to be a str")
__self__.last_modified = last_modified
"""
Last modified date of the object in RFC1123 format (e.g. `Mon, 02 Jan 2006 15:04:05 MST`)
"""
if metadata and not isinstance(metadata, dict):
raise TypeError("Expected argument 'metadata' to be a dict")
__self__.metadata = metadata
"""
A map of metadata stored with the object in S3
"""
if object_lock_legal_hold_status and not isinstance(object_lock_legal_hold_status, str):
raise TypeError("Expected argument 'object_lock_legal_hold_status' to be a str")
__self__.object_lock_legal_hold_status = object_lock_legal_hold_status
"""
Indicates whether this object has an active [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds). This field is only returned if you have permission to view an object's legal hold status.
"""
if object_lock_mode and not isinstance(object_lock_mode, str):
raise TypeError("Expected argument 'object_lock_mode' to be a str")
__self__.object_lock_mode = object_lock_mode
"""
The object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) currently in place for this object.
"""
if object_lock_retain_until_date and not isinstance(object_lock_retain_until_date, str):
raise TypeError("Expected argument 'object_lock_retain_until_date' to be a str")
__self__.object_lock_retain_until_date = object_lock_retain_until_date
"""
The date and time when this object's object lock will expire.
"""
if range and not isinstance(range, str):
raise TypeError("Expected argument 'range' to be a str")
__self__.range = range
if server_side_encryption and not isinstance(server_side_encryption, str):
raise TypeError("Expected argument 'server_side_encryption' to be a str")
__self__.server_side_encryption = server_side_encryption
"""
If the object is stored using server-side encryption (KMS or Amazon S3-managed encryption key), this field includes the chosen encryption and algorithm used.
"""
if sse_kms_key_id and not isinstance(sse_kms_key_id, str):
raise TypeError("Expected argument 'sse_kms_key_id' to be a str")
__self__.sse_kms_key_id = sse_kms_key_id
"""
If present, specifies the ID of the Key Management Service (KMS) master encryption key that was used for the object.
"""
if storage_class and not isinstance(storage_class, str):
raise TypeError("Expected argument 'storage_class' to be a str")
__self__.storage_class = storage_class
"""
[Storage class](http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) information of the object. Available for all objects except for `Standard` storage class objects.
"""
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
"""
A map of tags assigned to the object.
"""
if version_id and not isinstance(version_id, str):
raise TypeError("Expected argument 'version_id' to be a str")
__self__.version_id = version_id
"""
The latest version ID of the object returned.
"""
if website_redirect_location and not isinstance(website_redirect_location, str):
raise TypeError("Expected argument 'website_redirect_location' to be a str")
__self__.website_redirect_location = website_redirect_location
"""
If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.
"""
class AwaitableGetBucketObjectResult(GetBucketObjectResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBucketObjectResult(
body=self.body,
bucket=self.bucket,
cache_control=self.cache_control,
content_disposition=self.content_disposition,
content_encoding=self.content_encoding,
content_language=self.content_language,
content_length=self.content_length,
content_type=self.content_type,
etag=self.etag,
expiration=self.expiration,
expires=self.expires,
id=self.id,
key=self.key,
last_modified=self.last_modified,
metadata=self.metadata,
object_lock_legal_hold_status=self.object_lock_legal_hold_status,
object_lock_mode=self.object_lock_mode,
object_lock_retain_until_date=self.object_lock_retain_until_date,
range=self.range,
server_side_encryption=self.server_side_encryption,
sse_kms_key_id=self.sse_kms_key_id,
storage_class=self.storage_class,
tags=self.tags,
version_id=self.version_id,
website_redirect_location=self.website_redirect_location)
def get_bucket_object(bucket=None,key=None,range=None,tags=None,version_id=None,opts=None):
"""
The S3 object data source allows access to the metadata and
_optionally_ (see below) content of an object stored inside S3 bucket.
> **Note:** The content of an object (`body` field) is available only for objects which have a human-readable `Content-Type` (`text/*` and `application/json`). This is to prevent printing unsafe characters and potentially downloading large amount of data which would be thrown away in favour of metadata.
## Example Usage
The following example retrieves a text object (which must have a `Content-Type`
value starting with `text/`) and uses it as the `user_data` for an EC2 instance:
```python
import pulumi
import pulumi_aws as aws
bootstrap_script = aws.s3.get_bucket_object(bucket="ourcorp-deploy-config",
key="ec2-bootstrap-script.sh")
example = aws.ec2.Instance("example",
ami="ami-2757f631",
instance_type="t2.micro",
user_data=bootstrap_script.body)
```
The following, more-complex example retrieves only the metadata for a zip
file stored in S3, which is then used to pass the most recent `version_id`
to AWS Lambda for use as a function implementation. More information about
Lambda functions is available in the documentation for
`lambda.Function`.
```python
import pulumi
import pulumi_aws as aws
lambda_ = aws.s3.get_bucket_object(bucket="ourcorp-lambda-functions",
key="hello-world.zip")
test_lambda = aws.lambda_.Function("testLambda",
handler="exports.test",
role=aws_iam_role["iam_for_lambda"]["arn"],
s3_bucket=lambda_.bucket,
s3_key=lambda_.key,
s3_object_version=lambda_.version_id)
```
:param str bucket: The name of the bucket to read the object from. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified
:param str key: The full path to the object inside the bucket
:param dict tags: A map of tags assigned to the object.
:param str version_id: Specific version ID of the object returned (defaults to latest version)
"""
__args__ = dict()
__args__['bucket'] = bucket
__args__['key'] = key
__args__['range'] = range
__args__['tags'] = tags
__args__['versionId'] = version_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:s3/getBucketObject:getBucketObject', __args__, opts=opts).value
return AwaitableGetBucketObjectResult(
body=__ret__.get('body'),
bucket=__ret__.get('bucket'),
cache_control=__ret__.get('cacheControl'),
content_disposition=__ret__.get('contentDisposition'),
content_encoding=__ret__.get('contentEncoding'),
content_language=__ret__.get('contentLanguage'),
content_length=__ret__.get('contentLength'),
content_type=__ret__.get('contentType'),
etag=__ret__.get('etag'),
expiration=__ret__.get('expiration'),
expires=__ret__.get('expires'),
id=__ret__.get('id'),
key=__ret__.get('key'),
last_modified=__ret__.get('lastModified'),
metadata=__ret__.get('metadata'),
object_lock_legal_hold_status=__ret__.get('objectLockLegalHoldStatus'),
object_lock_mode=__ret__.get('objectLockMode'),
object_lock_retain_until_date=__ret__.get('objectLockRetainUntilDate'),
range=__ret__.get('range'),
server_side_encryption=__ret__.get('serverSideEncryption'),
sse_kms_key_id=__ret__.get('sseKmsKeyId'),
storage_class=__ret__.get('storageClass'),
tags=__ret__.get('tags'),
version_id=__ret__.get('versionId'),
website_redirect_location=__ret__.get('websiteRedirectLocation'))
|
py | 1a3e7c6a6da59e319c72e43b8e342bea8d80c2e3 | # model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='open-mmlab://msra/hrnetv2_w32',
backbone=dict(
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256)))),
neck=dict(type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 19])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 20
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cascade_rcnn_hrnetv2p_w32'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
py | 1a3e7d0cf46c1d3ea5659f23107565ec3ea144b5 | # Copyright (c) 2019 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Provide data iterator for CIFAR10 examples.
'''
from contextlib import contextmanager
import numpy as np
import struct
import tarfile
import zlib
import time
import os
import errno
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.data_source_loader import download, get_data_home
class Cifar10DataSource(DataSource):
'''
Get data directly from cifar10 dataset from Internet(yann.lecun.com).
'''
def _get_data(self, position):
image = self._images[self._indexes[position]]
label = self._labels[self._indexes[position]]
return (image, label)
def __init__(self, train=True, shuffle=False, rng=None):
super(Cifar10DataSource, self).__init__(shuffle=shuffle, rng=rng)
self._train = train
data_uri = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
logger.info('Getting labeled data from {}.'.format(data_uri))
r = download(data_uri) # file object returned
with tarfile.open(fileobj=r, mode="r:gz") as fpin:
# Training data
if train:
images = []
labels = []
for member in fpin.getmembers():
if "data_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, encoding="bytes", allow_pickle=True)
images.append(data[b"data"])
labels.append(data[b"labels"])
self._size = 50000
self._images = np.concatenate(
images).reshape(self._size, 3, 32, 32)
self._labels = np.concatenate(labels).reshape(-1, 1)
# Validation data
else:
for member in fpin.getmembers():
if "test_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, encoding="bytes", allow_pickle=True)
images = data[b"data"]
labels = data[b"labels"]
self._size = 10000
self._images = images.reshape(self._size, 3, 32, 32)
self._labels = np.array(labels).reshape(-1, 1)
r.close()
logger.info('Getting labeled data from {}.'.format(data_uri))
self._size = self._labels.size
self._variables = ('x', 'y')
if rng is None:
rng = np.random.RandomState(313)
self.rng = rng
self.reset()
def reset(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = np.arange(self._size)
super(Cifar10DataSource, self).reset()
@property
def images(self):
"""Get copy of whole data with a shape of (N, 1, H, W)."""
return self._images.copy()
@property
def labels(self):
"""Get copy of whole label with a shape of (N, 1)."""
return self._labels.copy()
def data_iterator_cifar10(batch_size,
train=True,
rng=None,
shuffle=True,
with_memory_cache=False,
with_file_cache=False):
'''
Provide DataIterator with :py:class:`Cifar10DataSource`
with_memory_cache and with_file_cache option's default value is all False,
because :py:class:`Cifar10DataSource` is able to store all data into memory.
'''
return data_iterator(Cifar10DataSource(train=train, shuffle=shuffle, rng=rng),
batch_size,
rng,
with_memory_cache,
with_file_cache)
|
py | 1a3e7e28864716b0f76036b26bd8c1286f677cad | import sys
DIGIT_MAP = {
'zero': '0',
'um': '1',
'dois': '2',
'três': '3',
'quatro': '4',
'cinco': '5',
'seis': '6',
'sete': '7',
'oito': '8',
'nove': '9',
}
'''
primeira forma de tratar excepções
def converter(s):
try:
number = ''
for token in s:
number += DIGIT_MAP[token]
return int(number)
except (KeyError, TypeError):
pass #alternativamente return -1
'''
#segunda forma de tratar excepções
def converter(s):
try:
number = ''
for token in s:
number += DIGIT_MAP[token]
return int(number)
except (KeyError, TypeError) as e:
print(f "Conversion error: {e!r}",
file=sys.stderr)
return -1 |
py | 1a3e7ee1ee16727dae7eb217f1e43bfa80670912 | import pandas as pd
from ..utils.messages import msg_warning, msg_info
def _drop(df: pd.DataFrame, *cols) -> pd.DataFrame:
try:
index = df.columns.values
for col in cols:
if col not in index:
msg_warning("Column", col, "not found. Aborting")
return
df = df.drop(col, axis=1)
except Exception as e:
raise ("Can not drop column", e)
return df
def _rename(df: pd.DataFrame, source_col: str, dest_col: str) -> pd.DataFrame:
try:
df = df.rename(columns={source_col: dest_col})
except Exception as e:
raise ("Can not rename column", e)
msg_info("Column", source_col, "renamed")
return df
|
py | 1a3e7f3d76ebaf80295d10133b5cb78e8979ef3b |
from testutils import assert_raises
import struct
data = struct.pack('IH', 14, 12)
assert data == bytes([14, 0, 0, 0, 12, 0])
v1, v2 = struct.unpack('IH', data)
assert v1 == 14
assert v2 == 12
data = struct.pack('<IH', 14, 12)
assert data == bytes([14, 0, 0, 0, 12, 0])
v1, v2 = struct.unpack('<IH', data)
assert v1 == 14
assert v2 == 12
data = struct.pack('>IH', 14, 12)
assert data == bytes([0, 0, 0, 14, 0, 12])
v1, v2 = struct.unpack('>IH', data)
assert v1 == 14
assert v2 == 12
data = struct.pack('3B', 65, 66, 67)
assert data == bytes([65, 66, 67])
v1, v2, v3 = struct.unpack('3B', data)
assert v1 == 65
assert v2 == 66
assert v3 == 67
with assert_raises(Exception):
data = struct.pack('B0B', 65, 66)
with assert_raises(Exception):
data = struct.pack('B2B', 65, 66)
data = struct.pack('B1B', 65, 66)
with assert_raises(Exception):
struct.pack('<IH', "14", 12)
assert struct.calcsize("B") == 1
assert struct.calcsize("<L4B") == 8
assert struct.Struct('3B').pack(65, 66, 67) == bytes([65, 66, 67])
class Indexable(object):
def __init__(self, value):
self._value = value
def __index__(self):
return self._value
data = struct.pack('B', Indexable(65))
assert data == bytes([65])
data = struct.pack('5s', b"test1")
assert data == b"test1"
data = struct.pack('3s', b"test2")
assert data == b"tes"
data = struct.pack('7s', b"test3")
assert data == b"test3\0\0"
data = struct.pack('?', True)
assert data == b'\1'
data = struct.pack('?', [])
assert data == b'\0' |
py | 1a3e8127a639f34451e551e2b121f2651da4f27c | """
Sumarize results for the train/valid/test splits.
# PROGRAM : metrics.py
# POURPOSE : compute model metrics on the test datasete
# AUTHOR : Caio Eadi Stringari
# EMAIL : [email protected]
# V1.0 : 05/05/2020 [Caio Stringari]
"""
import argparse
import numpy as np
import tensorflow as tf
import pandas as pd
import pathlib
try:
import efficientnet.tfkeras as efn
except Exception:
print(ImportError("\nWarning: run pip install -U --pre efficientnet"))
from tensorflow.keras.preprocessing.image import ImageDataGenerator
if __name__ == '__main__':
print("\nClassifiying wave breaking data, please wait...\n")
# Argument parser
parser = argparse.ArgumentParser()
# input model and history
parser.add_argument("--model", "-M",
nargs=1,
action="store",
dest="model",
required=True,
help="Input model in .h5 format.",)
parser.add_argument("--history", "-hist",
nargs=1,
action="store",
dest="history",
required=True,
help="Input model history in csv format.",)
# input test data
parser.add_argument("--data", "-data",
nargs=1,
action="store",
dest="data",
required=True,
help="Input path with image data.",)
parser.add_argument("--threshold", "-trx",
nargs=1,
action="store",
dest="TRX",
default=[0.5],
required=False,
help="Probability threshold for classification.")
parser.add_argument("--epoch", "-epch",
nargs=1,
action="store",
dest="epoch",
default=[-1],
required=False,
help="Which epoch to use. Default is last epoch.")
# output data
parser.add_argument("--output", "-o",
nargs=1,
action="store",
dest="output",
required=True,
help="Output file.",)
args = parser.parse_args()
# --- test data input ---
test_dir = args.data[0]
test_dir = pathlib.Path(test_dir)
image_count = len(list(test_dir.glob('*/*')))
epoch = int(args.epoch[0])
BATCH_SIZE = int(image_count/10)
class_names = np.array([item.name for item in test_dir.glob('*')])
try:
nclasses = len(class_names)
print(" Found image data, proceeding.\n")
print(" - Classes are {}".format(class_names))
except Exception:
raise IOError("Check your data!")
# --- pre-trained model ---
model = tf.keras.models.load_model(args.model[0])
history = pd.read_csv(args.history[0])
# train data
accuracy = history.iloc[epoch]["Binary_Accuracy"]
tp = history.iloc[epoch]["True_Positives"]
fp = history.iloc[epoch]["False_Positives"]
tn = history.iloc[epoch]["True_Negatives"]
fn = history.iloc[epoch]["False_Negatives"]
precision = history.iloc[epoch]["Precision"]
recall = history.iloc[epoch]["Recall"]
auc = history.iloc[epoch]["AUC"]
X = [accuracy, tp, fp, tn, fn, precision, recall, auc]
cols = ["Binary_Accuracy", "True_Positives", "False_Positives",
"True_Negatives", "False_Negatives", "Precision", "Recall", "AUC"]
df_train = pd.DataFrame([X], columns=cols)
df_train.index = ["Train"]
print(df_train)
# validation data
accuracy = history.iloc[epoch]["val_Binary_Accuracy"]
tp = history.iloc[epoch]["val_True_Positives"]
fp = history.iloc[epoch]["val_False_Positives"]
tn = history.iloc[epoch]["val_True_Negatives"]
fn = history.iloc[epoch]["val_False_Negatives"]
precision = history.iloc[epoch]["val_Precision"]
recall = history.iloc[epoch]["val_Recall"]
auc = history.iloc[epoch]["val_AUC"]
X = [accuracy, tp, fp, tn, fn, precision, recall, auc]
cols = ["Binary_Accuracy", "True_Positives", "False_Positives",
"True_Negatives", "False_Negatives", "Precision", "Recall", "AUC"]
df_val = pd.DataFrame([X], columns=cols)
df_val.index = ["Validation"]
print(df_val)
# evaluate the model on test data
inp_shape = model.input_shape
img_height = inp_shape[1] # image height for all images
img_width = inp_shape[2] # image width for all images
datagen = ImageDataGenerator(rescale=1./255.)
print("\n Fitting the teset data generator:\n")
data_gen_test = datagen.flow_from_directory(
directory=str(test_dir), batch_size=BATCH_SIZE, shuffle=False,
target_size=(img_height, img_width), classes=["0", "1"],
class_mode="binary")
result = model.evaluate(data_gen_test)
metrics = dict(zip(model.metrics_names, result))
# validation data
accuracy = metrics["Binary_Accuracy"]
tp = metrics["True_Positives"]
fp = metrics["False_Positives"]
tn = metrics["True_Negatives"]
fn = metrics["False_Negatives"]
precision = metrics["Precision"]
recall = metrics["Recall"]
auc = metrics["AUC"]
X = [accuracy, tp, fp, tn, fn, precision, recall, auc]
cols = ["Binary_Accuracy", "True_Positives", "False_Positives",
"True_Negatives", "False_Negatives", "Precision", "Recall", "AUC"]
df_test = pd.DataFrame([X], columns=cols)
df_test.index = ["Test"]
# merge results
df = pd.concat([df_train, df_val, df_test])
print(df)
df.to_excel(args.output[0], float_format="%.3f", index=True)
print("\nMy work is done!\n")
|
py | 1a3e8130537c18f4465c3559be08e5ff13ab1049 | """Eclect.us view"""
__docformat__ = "numpy"
from gamestonk_terminal.stocks.fundamental_analysis import eclect_us_model
from gamestonk_terminal.rich_config import console
def display_analysis(
ticker: str,
) -> None:
"""Display analysis of SEC filings based on NLP model. [Source: https://eclect.us]
Parameters
----------
ticker: str
Ticker to do SEC filings analysis from
"""
analysis = eclect_us_model.get_filings_analysis(ticker)
if analysis:
console.print(analysis)
else:
console.print("Filings not found from eclect.us")
console.print("")
|
py | 1a3e81b9da940d3099ecea54cde721f80fa28ef8 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import time
import pytest
import uamqp
from uamqp import authentication, errors, c_uamqp
from azure.eventhub import (
EventData,
EventHubSharedKeyCredential,
EventHubProducerClient,
EventHubConsumerClient
)
from azure.eventhub.exceptions import OperationTimeoutError
@pytest.mark.liveTest
def test_send_with_long_interval_sync(live_eventhub, sleep):
sender = EventHubProducerClient(live_eventhub['hostname'], live_eventhub['event_hub'],
EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']))
with sender:
batch = sender.create_batch()
batch.add(EventData(b"A single event"))
sender.send_batch(batch)
for _ in range(1):
if sleep:
time.sleep(300)
else:
sender._producers[-1]._handler._connection._conn.destroy()
batch = sender.create_batch()
batch.add(EventData(b"A single event"))
sender.send_batch(batch)
partition_ids = sender.get_partition_ids()
received = []
for p in partition_ids:
uri = "sb://{}/{}".format(live_eventhub['hostname'], live_eventhub['event_hub'])
sas_auth = authentication.SASTokenAuth.from_shared_access_key(
uri, live_eventhub['key_name'], live_eventhub['access_key'])
source = "amqps://{}/{}/ConsumerGroups/{}/Partitions/{}".format(
live_eventhub['hostname'],
live_eventhub['event_hub'],
live_eventhub['consumer_group'],
p)
receiver = uamqp.ReceiveClient(source, auth=sas_auth, debug=False, timeout=5000, prefetch=500)
try:
receiver.open()
received.extend([EventData._from_message(x) for x in receiver.receive_message_batch(timeout=5000)])
finally:
receiver.close()
assert len(received) == 2
assert list(received[0].body)[0] == b"A single event"
@pytest.mark.liveTest
def test_send_connection_idle_timeout_and_reconnect_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubProducerClient.from_connection_string(conn_str=connection_str, idle_timeout=10)
with client:
ed = EventData('data')
sender = client._create_producer(partition_id='0')
with sender:
sender._open_with_retry()
time.sleep(11)
sender._unsent_events = [ed.message]
ed.message.on_send_complete = sender._on_outcome
with pytest.raises((uamqp.errors.ConnectionClose,
uamqp.errors.MessageHandlerError, OperationTimeoutError)):
# Mac may raise OperationTimeoutError or MessageHandlerError
sender._send_event_data()
sender._send_event_data_with_retry()
messages = receivers[0].receive_message_batch(max_batch_size=10, timeout=10000)
received_ed1 = EventData._from_message(messages[0])
assert received_ed1.body_as_str() == 'data'
@pytest.mark.liveTest
def test_receive_connection_idle_timeout_and_reconnect_sync(connstr_senders):
connection_str, senders = connstr_senders
client = EventHubConsumerClient.from_connection_string(
conn_str=connection_str,
consumer_group='$default',
idle_timeout=10
)
def on_event_received(event):
on_event_received.event = event
with client:
consumer = client._create_consumer("$default", "0", "-1", on_event_received)
with consumer:
consumer._open()
time.sleep(11)
ed = EventData("Event")
senders[0].send(ed)
consumer._handler.do_work()
assert consumer._handler._connection._state == c_uamqp.ConnectionState.DISCARDING
duration = 10
now_time = time.time()
end_time = now_time + duration
while now_time < end_time:
consumer.receive()
time.sleep(0.01)
now_time = time.time()
assert on_event_received.event.body_as_str() == "Event"
|
py | 1a3e82117dc5e5b93b9288b87d144c0b2164b27f | import discord
from discord.ext import commands
from random import randint
class Bottlespin:
"""Spins a bottle and lands on a random user."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True, alias=["bottlespin"])
async def spin(self, ctx, role):
"""Spin the bottle"""
roles = ctx.message.server.roles
if "@" in role:
await self.bot.say("Please do noy use @ infront of the role. Thank you")
return
rolename = [role.name for role in roles]
rolename = str(rolename).lower()
role = role.lower()
author = ctx.message.author
server = ctx.message.server
if len(server.members) < 2:
await self.bot.say("`Not enough people are around to spin the bottle`")
return
if role in rolename:
roleexist = True
else:
await self.bot.say("`{} is not a exising role`".format(role))
return
if roleexist:
target = [m for m in server.members if m != author and role in [
s.name for s in m.roles] and str(m.status) == "online" or str(m.status) == "idle"]
else:
target = [m for m in server.members if m != author and str(
m.status) == "online" or str(m.status) == "idle"]
if not target:
if role:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at with the role {}`".format(role))
else:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at`")
return
else:
target = target[randint(0, len(target)-1)]
await self.bot.say("`{0.display_name}#{0.discriminator} spinned the bottle and it landed on {1.display_name}#{1.discriminator}`".format(author, target))
def setup(bot):
n = Bottlespin(bot)
bot.add_cog(n)
|
py | 1a3e8340ff7474ce62e72664f31460dcf7e45715 | """ Periodic maintenance tasks """
import time
import typing
class Maintenance:
""" Container for periodic maintenance tasks """
def __init__(self, app):
self.app = app
self.tasks: typing.Dict[typing.Callable[[], None],
typing.Dict[str, float]] = {}
def register(self, func: typing.Callable[[], None], interval: float):
""" Registers a task to run periodically """
self.tasks[func] = {'interval': interval}
def run(self, force: bool = False):
""" Run all pending tasks; 'force' will run all tasks whether they're
pending or not. """
with self.app.app_context():
now = time.time()
for func, spec in self.tasks.items():
if force or now >= spec.get('next_run', 0):
func()
spec['next_run'] = now + spec['interval']
|
py | 1a3e838a92ab23f454f16e5dfa7d4ce587812cb2 | class Solution:
def fib(self, N: int) -> int:
|
py | 1a3e83d7aaaa4ee1d5c9a95961205dae7f0542e4 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class DjangoDarajaConfig(AppConfig):
name = 'django_daraja'
|
py | 1a3e846fd5b48bcc33f14c5244eb820698350183 | # As suggested by joni, this is doable by using the scipy.optimize.minimize library. You could define a function residual as follows:
def residual(x):
# calculate/define Q1 = g1(x)
# calculate/define Q2 = g2(x)
res = Q1 + Q2
return res
# This function then can easily be minimized using a constrained algorithm from scipy.optimize.minimize:
import numpy as np
from scipy.optimize import minimize
x0 = 1 # just for example
res = minimize(residual, x0, method='trust-constr', constraints=your_constraints)
# The constraint P1+P2 = target must be defined and passed to the constraints argument as described here. You have to look for linear or non-linear constraint depending upon your constraint.
|
py | 1a3e84d2d187b1151ed7f02a5433f3330773231e | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import tags
from cloudcafe.common.tools.datagen import rand_name
from cloudcafe.compute.common.clients.ping import PingClient
from cloudroast.compute.fixtures import ComputeFixture
class ConfigDriveFilesTest(ComputeFixture):
@classmethod
def setUpClass(cls):
"""
Perform actions that setup the necessary resources for testing
The following resources are created during this set up:
- A keypair with a random name starting with 'key'
- A server with the following settings:
- config_drive set to True
- The keypair previously created
- Remaining values required for creating a server will come
from test configuration.
"""
super(ConfigDriveFilesTest, cls).setUpClass()
cls.key = cls.keypairs_client.create_keypair(rand_name("key")).entity
cls.resources.add(cls.key.name,
cls.keypairs_client.delete_keypair)
cls.server = cls.server_behaviors.create_active_server(
config_drive=True,
key_name=cls.key.name).entity
cls.resources.add(cls.server.id,
cls.servers_client.delete_server)
cls.config_drive_behaviors.mount_config_drive(
server=cls.server, servers_config=cls.servers_config,
key=cls.key.private_key,
source_path=cls.config_drive_config.mount_source_path,
destination_path=cls.config_drive_config.base_path_to_mount)
cls.vendor_meta = cls.config_drive_behaviors.get_vendor_metadata(
cls.server, cls.servers_config, key=cls.key.private_key,
filepath=cls.config_drive_config.vendor_meta_filepath)
@tags(type='smoke', net='yes')
def test_config_drive_network_metadata_dns_services(self):
"""
Verify Services of vendor networking metadata on config drive
Validate that there is at least one network information service in the
vendor metadata. Attempt to ping every service IP address in the network
information service(s). Validate that none of the ping attempts failed.
The following assertions occur:
- The number of network information services on the server is
greater than or equal to 1
- The list of failed ping attempts is empty.
"""
self.assertGreaterEqual(len(self.vendor_meta.network_info.services), 1,
msg='Expected config drive to have at least 1'
' network dns service configured')
service_ips = [service.address for service in
self.vendor_meta.network_info.services]
failed_pings = []
for service_ip in service_ips:
try:
PingClient.ping_until_reachable(
service_ip, timeout=60, interval_time=5)
except:
failed_pings.append(service_ip)
self.assertFalse(failed_pings, msg="Unable to reach the following "
"IP addresses: {0}".format(failed_pings))
@tags(type='smoke', net='yes')
def test_config_drive_network_metadata_networks(self):
"""
Vendor networking metadata should match the server's addresses
Validate that every IP address on the server is found in the network
information in the vendor metadata for the server created during test
set up.
The following assertions occur:
- The list of ips that are found on the server but not found in the
vendor metadata networks information is empty.
"""
expected_addresses = []
addresses = self.server.addresses
for name, ip_addresses in self.expected_networks.iteritems():
network = addresses.get_by_name(name)
if ip_addresses.get('v4'):
expected_addresses.append(network.ipv4)
if ip_addresses.get('v6'):
expected_addresses.append(network.ipv6)
config_drive_instance_ips = [network.ip_address for network in
self.vendor_meta.network_info.networks]
missing_ips = [ip for ip in expected_addresses if ip not in
config_drive_instance_ips]
self.assertFalse(missing_ips, msg="Missing IPs found: {0}".format(
missing_ips))
@tags(type='smoke', net='yes')
def test_config_drive_network_metadata_file_links_structure(self):
"""
Verify File structure of vendor metadata on config drive
Validate that there is at least one network information link. Validate
that the last link in the list has values for the attributes 'mtu',
'id', or 'vif_id'.
The following assertions occur:
- The number of network information links on the server is
greater than or equal to 1
- The last link in the list of links in vendor metadata has values
for the attributes 'mtu', 'id', and 'vif_id'
"""
self.assertGreaterEqual(len(self.vendor_meta.network_info.links), 1,
msg='Expected config drive to have at least 1'
' hardware link configured')
for link in self.vendor_meta.network_info.links:
bad_attrs = [attr for attr in ['mtu', 'id', 'vif_id']
if getattr(link, attr, None) is None]
self.assertFalse(bad_attrs, msg="{0} not set in response".format(
" ".join(bad_attrs)))
@tags(type='smoke', net='yes')
def test_config_drive_network_metadata_file_network_structure(self):
"""
Verify File structure of vendor metadata on config drive
Validate that the last network in the network list from the network
information in vendor metadata on the server created during test set up
has values for the 'type', 'netmask', 'link', 'routes', 'id'.
The following assertions occur:
- The last network in the network information in the vendor metadata
has values for the attributes 'type', 'netmask', 'link', 'routes',
and 'id'
"""
for network in self.vendor_meta.network_info.networks:
bad_attrs = [attr for attr in ['type',
'netmask',
'link',
'routes',
'id']
if getattr(network, attr, None) is None]
self.assertFalse(bad_attrs, msg="{0} not set in response".format(
" ".join(bad_attrs)))
def test_config_drive_vendor_metadata_ip_whitelist(self):
"""
The vendor metadata in config drive should have an ip whitelist
Validate that there is a value for the IP whitelist in the vendor
metadata.
The following assertions occur:
- The ip whitelist vendor metadata is not None
"""
self.assertIsNotNone(self.vendor_meta.ip_whitelist,
msg="ip_whitelist was not set in the response")
|
py | 1a3e87561983f7765a1dbec9e46648a77cb8caa0 | """
Everyone spend more time with crag pups.
"""
import time
|
py | 1a3e87fa97234b49761f89331341b9c34e68bad2 | import collections.abc
import copy
import inspect
import warnings
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
)
import prefect
import prefect.engine.cache_validators
from prefect.engine.results import ResultHandlerResult
import prefect.engine.signals
import prefect.triggers
from prefect.utilities import logging
from prefect.utilities.notifications import callback_factory
from prefect.utilities.tasks import unmapped
if TYPE_CHECKING:
from prefect.core.flow import Flow # pylint: disable=W0611
from prefect.engine.result import Result # pylint: disable=W0611
from prefect.engine.result_handlers import ResultHandler # pylint: disable=W0611
from prefect.engine.state import State # pylint: disable=W0611
from prefect.core import Edge # pylint: disable=W0611
VAR_KEYWORD = inspect.Parameter.VAR_KEYWORD
def _validate_run_signature(run: Callable) -> None:
func = getattr(run, "__wrapped__", run)
try:
run_sig = inspect.getfullargspec(func)
except TypeError as exc:
if str(exc) == "unsupported callable":
raise ValueError(
"This function can not be inspected (this is common "
"with `builtin` and `numpy` functions). In order to "
"use it as a task, please wrap it in a standard "
"Python function. For more detail, see "
"https://docs.prefect.io/core/advanced_tutorials/task-guide.html#the-task-decorator"
)
raise
if run_sig.varargs:
raise ValueError(
"Tasks with variable positional arguments (*args) are not "
"supported, because all Prefect arguments are stored as "
"keywords. As a workaround, consider modifying the run() "
"method to accept **kwargs and feeding the values "
"to *args."
)
reserved_kwargs = ["upstream_tasks", "mapped", "task_args", "flow"]
violations = [kw for kw in reserved_kwargs if kw in run_sig.args]
if violations:
msg = "Tasks cannot have the following argument names: {}.".format(
", ".join(violations)
)
msg += " These are reserved keyword arguments."
raise ValueError(msg)
class SignatureValidator(type):
def __new__(cls, name: str, parents: tuple, methods: dict) -> "SignatureValidator":
run = methods.get("run", lambda: None)
_validate_run_signature(run)
# necessary to ensure classes that inherit from parent class
# also get passed through __new__
return type.__new__(cls, name, parents, methods) # type: ignore
class Task(metaclass=SignatureValidator):
"""
The Task class which is used as the full representation of a unit of work.
This Task class can be used directly as a first class object where it must
be inherited from by a class that implements the `run` method. For a more
functional way of generating Tasks, see [the task decorator](../utilities/tasks.html).
Inheritance example:
```python
class AddTask(Task):
def run(self, x, y):
return x + y
```
*Note:* The implemented `run` method cannot have `*args` in its signature. In addition,
the following keywords are reserved: `upstream_tasks`, `task_args` and `mapped`.
An instance of a `Task` can be used functionally to generate other task instances
with the same attributes but with different values bound to their `run` methods.
Example:
```python
class AddTask(Task):
def run(self, x, y):
return x + y
a = AddTask()
with Flow("My Flow") as f:
t1 = a(1, 2) # t1 != a
t2 = a(5, 7) # t2 != a
```
To bind values to a Task's run method imperatively (and without making a copy), see `Task.bind`.
Args:
- name (str, optional): The name of this task
- slug (str, optional): The slug for this task. Slugs provide a stable ID for tasks so that
the Prefect API can identify task run states. If a slug is not provided, one will be generated
automatically once the task is added to a Flow.
- tags ([str], optional): A list of tags for this task
- max_retries (int, optional): The maximum amount of times this task can be retried
- retry_delay (timedelta, optional): The amount of time to wait until task is retried
- timeout (int, optional): The amount of time (in seconds) to wait while
running this task before a timeout occurs; note that sub-second
resolution is not supported
- trigger (callable, optional): a function that determines whether the
task should run, based on the states of any upstream tasks.
- skip_on_upstream_skip (bool, optional): if `True`, if any immediately
upstream tasks are skipped, this task will automatically be skipped as
well, regardless of trigger. By default, this prevents tasks from
attempting to use either state or data from tasks that didn't run. If
`False`, the task's trigger will be called as normal, with skips
considered successes. Defaults to `True`.
- cache_for (timedelta, optional, DEPRECATED): The amount of time to maintain a cache
of the outputs of this task. Useful for situations where the containing Flow
will be rerun multiple times, but this task doesn't need to be.
- cache_validator (Callable, optional, DEPRECATED): Validator that will determine
whether the cache for this task is still valid (only required if `cache_for`
is provided; defaults to `prefect.engine.cache_validators.duration_only`)
- cache_key (str, optional, DEPRECATED): if provided, a `cache_key`
serves as a unique identifier for this Task's cache, and can be shared
across both Tasks _and_ Flows; if not provided, the Task's _name_ will
be used if running locally, or the Task's database ID if running in
Cloud
- checkpoint (bool, optional): if this Task is successful, whether to
store its result using the `result_handler` available during the run;
Also note that checkpointing will only occur locally if
`prefect.config.flows.checkpointing` is set to `True`
- result_handler (ResultHandler, optional, DEPRECATED): the handler to
use for retrieving and storing state results during execution; if not
provided, will default to the one attached to the Flow
- result (Result, optional): the result instance used to retrieve and
store task results during execution
- target (Union[str, Callable], optional): location to check for task Result. If a result
exists at that location then the task run will enter a cached state.
`target` strings can be templated formatting strings which will be
formatted at runtime with values from `prefect.context`. If a callable function
is provided, it should have signature `callable(**kwargs) -> str` and at write
time all formatting kwargs will be passed and a fully formatted location is
expected as the return value. Can be used for string formatting logic that
`.format(**kwargs)` doesn't support
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the task changes state, providing an
opportunity to inspect or modify the new state. The handler
will be passed the task instance, the old (prior) state, and the new
(current) state, with the following signature:
`state_handler(task: Task, old_state: State, new_state: State) -> Optional[State]`
If multiple functions are passed, then the `new_state` argument will be the
result of the previous handler.
- on_failure (Callable, optional): A function with signature
`fn(task: Task, state: State) -> None` that will be called anytime this
Task enters a failure state
- log_stdout (bool, optional): Toggle whether or not to send stdout messages to
the Prefect logger. Defaults to `False`.
Raises:
- TypeError: if `tags` is of type `str`
- TypeError: if `timeout` is not of type `int`
"""
# Tasks are not iterable, though they do have a __getitem__ method
__iter__ = None
def __init__(
self,
name: str = None,
slug: str = None,
tags: Iterable[str] = None,
max_retries: int = None,
retry_delay: timedelta = None,
timeout: int = None,
trigger: Callable[[Dict["Edge", "State"]], bool] = None,
skip_on_upstream_skip: bool = True,
cache_for: timedelta = None,
cache_validator: Callable = None,
cache_key: str = None,
checkpoint: bool = None,
result_handler: "ResultHandler" = None,
state_handlers: List[Callable] = None,
on_failure: Callable = None,
log_stdout: bool = False,
result: "Result" = None,
target: str = None,
):
self.name = name or type(self).__name__
self.slug = slug
self.logger = logging.get_logger(self.name)
# avoid silently iterating over a string
if isinstance(tags, str):
raise TypeError("Tags should be a set of tags, not a string.")
current_tags = set(prefect.context.get("tags", set()))
self.tags = (set(tags) if tags is not None else set()) | current_tags
max_retries = (
max_retries
if max_retries is not None
else prefect.config.tasks.defaults.max_retries
)
retry_delay = (
retry_delay
if retry_delay is not None
else prefect.config.tasks.defaults.retry_delay
)
timeout = (
timeout if timeout is not None else prefect.config.tasks.defaults.timeout
)
if max_retries > 0 and retry_delay is None:
raise ValueError(
"A datetime.timedelta `retry_delay` must be provided if max_retries > 0"
)
# specify not max retries because the default is false
if retry_delay is not None and not max_retries:
raise ValueError(
"A `max_retries` argument greater than 0 must be provided if specifying a retry delay."
)
if timeout is not None and not isinstance(timeout, int):
raise TypeError(
"Only integer timeouts (representing seconds) are supported."
)
self.max_retries = max_retries
self.retry_delay = retry_delay
self.timeout = timeout
self.trigger = trigger or prefect.triggers.all_successful
self.skip_on_upstream_skip = skip_on_upstream_skip
if cache_for is None and (
cache_validator is not None
and cache_validator is not prefect.engine.cache_validators.never_use
):
warnings.warn(
"cache_validator provided without specifying cache expiration (cache_for); this Task will not be cached."
)
self.cache_for = cache_for
self.cache_key = cache_key
default_validator = (
prefect.engine.cache_validators.never_use
if cache_for is None
else prefect.engine.cache_validators.duration_only
)
self.cache_validator = cache_validator or default_validator
self.checkpoint = checkpoint
if result_handler:
warnings.warn(
"Result Handlers are deprecated; please use the new style Result classes instead."
)
self.result = ResultHandlerResult.from_result_handler(
result_handler
) # type: Optional[Result]
else:
self.result = result
self.target = target
# if both a target and a result were provided, update the result location
# to point at the target
if self.target and self.result:
if (
getattr(self.result, "location", None)
and self.result.location != self.target
):
warnings.warn(
"Both `result.location` and `target` were provided. "
"The `target` value will be used."
)
self.result = self.result.copy()
self.result.location = self.target
if state_handlers and not isinstance(state_handlers, collections.abc.Sequence):
raise TypeError("state_handlers should be iterable.")
self.state_handlers = state_handlers or []
if on_failure is not None:
self.state_handlers.append(
callback_factory(on_failure, check=lambda s: s.is_failed())
)
self.auto_generated = False
self.log_stdout = log_stdout
# if new task creations are being tracked, add this task
# this makes it possible to give guidance to users that forget
# to add tasks to a flow
if "_unused_task_tracker" in prefect.context:
if not isinstance(self, prefect.tasks.core.constants.Constant):
prefect.context._unused_task_tracker.add(self)
def __repr__(self) -> str:
return "<Task: {self.name}>".format(self=self)
# reimplement __hash__ because we override __eq__
def __hash__(self) -> int:
return id(self)
# Run --------------------------------------------------------------------
def run(self) -> None:
"""
The `run()` method is called (with arguments, if appropriate) to run a task.
*Note:* The implemented `run` method cannot have `*args` in its signature. In addition,
the following keywords are reserved: `upstream_tasks`, `task_args` and `mapped`.
If a task has arguments in its `run()` method, these can be bound either by using the functional
API and _calling_ the task instance, or by using `self.bind` directly.
In addition to running arbitrary functions, tasks can interact with Prefect in a few ways:
<ul><li> Return an optional result. When this function runs successfully,
the task is considered successful and the result (if any) can be
made available to downstream tasks. </li>
<li> Raise an error. Errors are interpreted as failure. </li>
<li> Raise a [signal](../engine/signals.html). Signals can include `FAIL`, `SUCCESS`, `RETRY`, `SKIP`, etc.
and indicate that the task should be put in the indicated state.
<ul>
<li> `FAIL` will lead to retries if appropriate </li>
<li> `SUCCESS` will cause the task to be marked successful </li>
<li> `RETRY` will cause the task to be marked for retry, even if `max_retries`
has been exceeded </li>
<li> `SKIP` will skip the task and possibly propogate the skip state through the
flow, depending on whether downstream tasks have `skip_on_upstream_skip=True`. </li></ul>
</li></ul>
"""
# Dependencies -------------------------------------------------------------
def copy(self, **task_args: Any) -> "Task":
"""
Creates and returns a copy of the current Task.
Args:
- **task_args (dict, optional): a dictionary of task attribute keyword arguments, these attributes
will be set on the new copy
Raises:
- AttributeError: if any passed `task_args` are not attributes of the original
Returns:
- Task: a copy of the current Task, with any attributes updated from `task_args`
"""
flow = prefect.context.get("flow", None)
if (
flow
and self in flow.tasks
and (flow.edges_to(self) or flow.edges_from(self))
):
warnings.warn(
"You are making a copy of a task that has dependencies on or to other tasks "
"in the active flow context. The copy will not retain those dependencies."
)
new = copy.copy(self)
if new.slug and "slug" not in task_args:
task_args["slug"] = new.slug + "-copy"
# check task_args
for attr, val in task_args.items():
if not hasattr(new, attr):
raise AttributeError(
"{0} does not have {1} as an attribute".format(self, attr)
)
else:
setattr(new, attr, val)
# if both a target and a result were provided, update the result location
# to point at the target
if new.target and new.result:
if (
getattr(new.result, "location", None)
and new.result.location != new.target
):
warnings.warn(
"Both `result.location` and `target` were provided. "
"The `target` value will be used."
)
new.result = new.result.copy()
new.result.location = new.target
new.tags = copy.deepcopy(self.tags).union(set(new.tags))
tags = set(prefect.context.get("tags", set()))
new.tags.update(tags)
# if new task creations are being tracked, add this task
# this makes it possible to give guidance to users that forget
# to add tasks to a flow. We also remove the original task,
# as it has been "interacted" with and don't want spurious
# warnings
if "_unused_task_tracker" in prefect.context:
if self in prefect.context._unused_task_tracker:
prefect.context._unused_task_tracker.remove(self)
if not isinstance(new, prefect.tasks.core.constants.Constant):
prefect.context._unused_task_tracker.add(new)
return new
@property
def __signature__(self) -> inspect.Signature:
"""Dynamically generate the signature, replacing ``*args``/``**kwargs``
with parameters from ``run``"""
if not hasattr(self, "_cached_signature"):
sig = inspect.Signature.from_callable(self.run)
parameters = list(sig.parameters.values())
parameters.extend(EXTRA_CALL_PARAMETERS)
self._cached_signature = inspect.Signature(
parameters=parameters, return_annotation="Task"
)
return self._cached_signature
def __call__(
self,
*args: Any,
mapped: bool = False,
task_args: dict = None,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
**kwargs: Any
) -> "Task":
"""
Calling a Task instance will first create a _copy_ of the instance, and then
bind any passed `args` / `kwargs` to the run method of the copy. This new task
is then returned.
Args:
- *args: arguments to bind to the new Task's `run` method
- **kwargs: keyword arguments to bind to the new Task's `run` method
- mapped (bool, optional): Whether the results of these tasks should be mapped over
with the specified keyword arguments; defaults to `False`.
If `True`, any arguments contained within a `prefect.utilities.tasks.unmapped`
container will _not_ be mapped over.
- task_args (dict, optional): a dictionary of task attribute keyword arguments, these attributes
will be set on the new copy
- upstream_tasks ([Task], optional): a list of upstream dependencies
for the new task. This kwarg can be used to functionally specify
dependencies without binding their result to `run()`
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
Returns:
- Task: a new Task instance
"""
new = self.copy(**(task_args or {}))
new.bind(
*args, mapped=mapped, upstream_tasks=upstream_tasks, flow=flow, **kwargs
)
return new
def bind(
self,
*args: Any,
mapped: bool = False,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
**kwargs: Any
) -> "Task":
"""
Binding a task to (keyword) arguments creates a _keyed_ edge in the active Flow
that will pass data from the arguments (whether Tasks or constants) to the
Task's `run` method under the appropriate key. Once a Task is bound in this
manner, the same task instance cannot be bound a second time in the same Flow.
To bind arguments to a _copy_ of this Task instance, see `__call__`.
Additionally, non-keyed edges can be created by passing any upstream
dependencies through `upstream_tasks`.
Args:
- *args: arguments to bind to the current Task's `run` method
- mapped (bool, optional): Whether the results of these tasks should be mapped over
with the specified keyword arguments; defaults to `False`.
If `True`, any arguments contained within a `prefect.utilities.tasks.unmapped`
container will _not_ be mapped over.
- upstream_tasks ([Task], optional): a list of upstream dependencies for the
current task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- **kwargs: keyword arguments to bind to the current Task's `run` method
Returns:
- Task: the current Task instance
"""
# this will raise an error if callargs weren't all provided
signature = inspect.signature(self.run)
callargs = dict(signature.bind(*args, **kwargs).arguments) # type: Dict
# bind() compresses all variable keyword arguments under the ** argument name,
# so we expand them explicitly
var_kw_arg = next(
(p for p in signature.parameters.values() if p.kind == VAR_KEYWORD), None
)
if var_kw_arg:
callargs.update(callargs.pop(var_kw_arg.name, {}))
flow = flow or prefect.context.get("flow", None)
if not flow:
raise ValueError("Could not infer an active Flow context.")
self.set_dependencies(
flow=flow,
upstream_tasks=upstream_tasks,
keyword_tasks=callargs,
mapped=mapped,
)
tags = set(prefect.context.get("tags", set()))
self.tags.update(tags)
return self
def map(
self,
*args: Any,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
task_args: dict = None,
**kwargs: Any
) -> "Task":
"""
Map the Task elementwise across one or more Tasks. Arguments that should _not_ be mapped over
should be placed in the `prefect.utilities.tasks.unmapped` container.
For example:
```
task.map(x=X, y=unmapped(Y))
```
will map over the values of `X`, but not over the values of `Y`
Args:
- *args: arguments to map over, which will elementwise be bound to the Task's `run` method
- upstream_tasks ([Task], optional): a list of upstream dependencies
to map over
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- task_args (dict, optional): a dictionary of task attribute keyword arguments,
these attributes will be set on the new copy
- **kwargs: keyword arguments to map over, which will elementwise be bound to the Task's `run` method
Raises:
- AttributeError: if any passed `task_args` are not attributes of the original
Returns:
- Task: a new Task instance
"""
for arg in args:
if not hasattr(arg, "__getitem__") and not isinstance(arg, unmapped):
raise TypeError(
"Cannot map over unsubscriptable object of type {t}: {preview}...".format(
t=type(arg), preview=repr(arg)[:10]
)
)
new = self.copy(**(task_args or {}))
return new.bind(
*args, mapped=True, upstream_tasks=upstream_tasks, flow=flow, **kwargs
)
def set_dependencies(
self,
flow: "Flow" = None,
upstream_tasks: Iterable[object] = None,
downstream_tasks: Iterable[object] = None,
keyword_tasks: Mapping[str, object] = None,
mapped: bool = False,
validate: bool = None,
) -> None:
"""
Set dependencies for a flow either specified or in the current context using this task
Args:
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- upstream_tasks ([object], optional): A list of upstream tasks for this task
- downstream_tasks ([object], optional): A list of downtream tasks for this task
- keyword_tasks ({str, object}}, optional): The results of these tasks will be provided
to this task under the specified keyword arguments.
- mapped (bool, optional): Whether the results of the _upstream_ tasks should be mapped over
with the specified keyword arguments
- validate (bool, optional): Whether or not to check the validity of the flow. If not
provided, defaults to the value of `eager_edge_validation` in your Prefect
configuration file.
Returns:
- None
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
flow = flow or prefect.context.get("flow", None)
if not flow:
raise ValueError(
"No Flow was passed, and could not infer an active Flow context."
)
flow.set_dependencies(
task=self,
upstream_tasks=upstream_tasks,
downstream_tasks=downstream_tasks,
keyword_tasks=keyword_tasks,
validate=validate,
mapped=mapped,
)
def set_upstream(
self, task: object, flow: "Flow" = None, key: str = None, mapped: bool = False
) -> None:
"""
Sets the provided task as an upstream dependency of this task.
Args:
- task (object): A task or object that will be converted to a task that will be set
as a upstream dependency of this task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- key (str, optional): The key to be set for the new edge; the result of the upstream task
will be passed to this task's `run()` method under this keyword argument.
- mapped (bool, optional): Whether this dependency is mapped; defaults to `False`
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
if key is not None:
keyword_tasks = {key: task}
self.set_dependencies(flow=flow, keyword_tasks=keyword_tasks, mapped=mapped)
else:
self.set_dependencies(flow=flow, upstream_tasks=[task], mapped=mapped)
def set_downstream(
self, task: "Task", flow: "Flow" = None, key: str = None, mapped: bool = False
) -> None:
"""
Sets the provided task as a downstream dependency of this task.
Args:
- task (Task): A task that will be set as a downstream dependency of this task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- key (str, optional): The key to be set for the new edge; the result of this task
will be passed to the downstream task's `run()` method under this keyword argument.
- mapped (bool, optional): Whether this dependency is mapped; defaults to `False`
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
if key is not None:
keyword_tasks = {key: self}
task.set_dependencies( # type: ignore
flow=flow, keyword_tasks=keyword_tasks, mapped=mapped
) # type: ignore
else:
task.set_dependencies(flow=flow, upstream_tasks=[self], mapped=mapped)
def inputs(self) -> Dict[str, Dict]:
"""
Describe the inputs for this task. The result is a dictionary that maps each input to
a `type`, `required`, and `default`. All values are inferred from the `run()`
signature; this method can be overloaded for more precise control.
Returns:
- dict
"""
inputs = {}
for name, parameter in inspect.signature(self.run).parameters.items():
input_type = parameter.annotation
if input_type is inspect._empty: # type: ignore
input_type = Any
input_default = parameter.default
input_required = False
if input_default is inspect._empty: # type: ignore
input_required = True
input_default = None
inputs[name] = dict(
type=input_type, default=input_default, required=input_required
)
return inputs
def outputs(self) -> Any:
"""
Get the output types for this task.
Returns:
- Any
"""
return_annotation = inspect.signature(self.run).return_annotation
if return_annotation is inspect._empty: # type: ignore
return_annotation = Any
return return_annotation
# Serialization ------------------------------------------------------------
def serialize(self) -> Dict[str, Any]:
"""
Creates a serialized representation of this task
Returns:
- dict representing this task
"""
return prefect.serialization.task.TaskSchema().dump(self)
# Operators ----------------------------------------------------------------
def is_equal(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self == other`
This can't be implemented as the __eq__() magic method because of Task
comparisons.
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Equal().bind(self, other)
def is_not_equal(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self != other`
This can't be implemented as the __neq__() magic method because of Task
comparisons.
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.NotEqual().bind(self, other)
def not_(self) -> "Task":
"""
Produces a Task that evaluates `not self`
Returns:
- Task
"""
return prefect.tasks.core.operators.Not().bind(self)
def or_(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self or other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Or().bind(self, other)
# Magic Method Interactions ----------------------------------------------------
def __getitem__(self, key: Any) -> "Task":
"""
Produces a Task that evaluates `self[key]`
Args:
- key (object): the object to use an an index for this task. It will be converted
to a Task if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GetItem().bind(self, key)
def __or__(self, other: object) -> object:
"""
Creates a state dependency between `self` and `other`
`self | other --> self.set_dependencies(downstream_tasks=[other])`
Args:
- other (object): An object that will be converted to a Task (if it isn't one already)
and set as a downstream dependency of this Task.
Returns:
- Task
"""
self.set_dependencies(downstream_tasks=[other])
return other
def __mifflin__(self) -> None: # coverage: ignore
"Calls Dunder Mifflin"
import webbrowser
webbrowser.open("https://cicdw.github.io/welcome.html")
def __ror__(self, other: object) -> "Task":
"""
Creates a state dependency between `self` and `other`:
`other | self --> self.set_dependencies(upstream_tasks=[other])`
Args:
- other (object): An object that will be converted to a Task and set as an
upstream dependency of this Task.
Returns:
- Task
"""
self.set_dependencies(upstream_tasks=[other])
return self
# Maginc Method Operators -----------------------------------------------------
def __add__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self + other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Add().bind(self, other)
def __sub__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self - other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Sub().bind(self, other)
def __mul__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self * other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mul().bind(self, other)
def __truediv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self / other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Div().bind(self, other)
def __floordiv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self // other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.FloorDiv().bind(self, other)
def __mod__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self % other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mod().bind(self, other)
def __pow__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self ** other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Pow().bind(self, other)
def __and__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self & other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.And().bind(self, other)
def __radd__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other + self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Add().bind(other, self)
def __rsub__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other - self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Sub().bind(other, self)
def __rmul__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other * self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mul().bind(other, self)
def __rtruediv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other / self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Div().bind(other, self)
def __rfloordiv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other // self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.FloorDiv().bind(other, self)
def __rmod__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other % self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mod().bind(other, self)
def __rpow__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other ** self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Pow().bind(other, self)
def __rand__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other & self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.And().bind(other, self)
def __gt__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self > other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GreaterThan().bind(self, other)
def __ge__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self >= other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GreaterThanOrEqual().bind(self, other)
def __lt__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self < other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.LessThan().bind(self, other)
def __le__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self <= other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.LessThanOrEqual().bind(self, other)
# All keyword-only arguments to Task.__call__, used for dynamically generating
# Signature objects for Task objects
EXTRA_CALL_PARAMETERS = [
p
for p in inspect.Signature.from_callable(Task.__call__).parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY
]
# DEPRECATED - this is to allow backwards-compatible access to Parameters
# https://github.com/PrefectHQ/prefect/pull/2758
from .parameter import Parameter as _Parameter
class Parameter(_Parameter):
def __new__(cls, *args, **kwargs): # type: ignore
warnings.warn("`Parameter` has moved, please import as `prefect.Parameter`")
return super().__new__(cls)
|
py | 1a3e897a1a3341160c0b6e9233a242bc70f63d01 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNatGatewayResult',
'AwaitableGetNatGatewayResult',
'get_nat_gateway',
]
@pulumi.output_type
class GetNatGatewayResult:
"""
Nat Gateway resource.
"""
def __init__(__self__, etag=None, id=None, idle_timeout_in_minutes=None, location=None, name=None, provisioning_state=None, public_ip_addresses=None, public_ip_prefixes=None, resource_guid=None, sku=None, subnets=None, tags=None, type=None, zones=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes and not isinstance(idle_timeout_in_minutes, int):
raise TypeError("Expected argument 'idle_timeout_in_minutes' to be a int")
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_addresses and not isinstance(public_ip_addresses, list):
raise TypeError("Expected argument 'public_ip_addresses' to be a list")
pulumi.set(__self__, "public_ip_addresses", public_ip_addresses)
if public_ip_prefixes and not isinstance(public_ip_prefixes, list):
raise TypeError("Expected argument 'public_ip_prefixes' to be a list")
pulumi.set(__self__, "public_ip_prefixes", public_ip_prefixes)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if subnets and not isinstance(subnets, list):
raise TypeError("Expected argument 'subnets' to be a list")
pulumi.set(__self__, "subnets", subnets)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The idle timeout of the nat gateway.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the NAT gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIpAddresses")
def public_ip_addresses(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
An array of public ip addresses associated with the nat gateway resource.
"""
return pulumi.get(self, "public_ip_addresses")
@property
@pulumi.getter(name="publicIpPrefixes")
def public_ip_prefixes(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
An array of public ip prefixes associated with the nat gateway resource.
"""
return pulumi.get(self, "public_ip_prefixes")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the NAT gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.NatGatewaySkuResponse']:
"""
The nat gateway SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubResourceResponse']:
"""
An array of references to the subnets using this nat gateway resource.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
A list of availability zones denoting the zone in which Nat Gateway should be deployed.
"""
return pulumi.get(self, "zones")
class AwaitableGetNatGatewayResult(GetNatGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNatGatewayResult(
etag=self.etag,
id=self.id,
idle_timeout_in_minutes=self.idle_timeout_in_minutes,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
public_ip_addresses=self.public_ip_addresses,
public_ip_prefixes=self.public_ip_prefixes,
resource_guid=self.resource_guid,
sku=self.sku,
subnets=self.subnets,
tags=self.tags,
type=self.type,
zones=self.zones)
def get_nat_gateway(expand: Optional[str] = None,
nat_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNatGatewayResult:
"""
Nat Gateway resource.
:param str expand: Expands referenced resources.
:param str nat_gateway_name: The name of the nat gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['natGatewayName'] = nat_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20190701:getNatGateway', __args__, opts=opts, typ=GetNatGatewayResult).value
return AwaitableGetNatGatewayResult(
etag=__ret__.etag,
id=__ret__.id,
idle_timeout_in_minutes=__ret__.idle_timeout_in_minutes,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
public_ip_addresses=__ret__.public_ip_addresses,
public_ip_prefixes=__ret__.public_ip_prefixes,
resource_guid=__ret__.resource_guid,
sku=__ret__.sku,
subnets=__ret__.subnets,
tags=__ret__.tags,
type=__ret__.type,
zones=__ret__.zones)
|
py | 1a3e89849dd10649c760a1292d0d54b23bb2719c | class A:
x = 1
y = x + z
A()
|
py | 1a3e8ae68ac09b1bc822b58baf76ba3199664fb0 |
import numpy as np
import tensornetwork as tn
import itertools as itt
#from scipy.sparse import linalg as la
#import matrixproductstates as mp
import scipy as SP
import pymps as mp
def kdelta(i,j):
"""
Parameters
----------
i : int
State index i.
j : int
State index j.
Returns
-------
int
Kronecker_Delta(i,j).
"""
return int(i==j)
# Construct MPS for 4 sites
bond_dim=2
n_sites = 4
mps = mp.init_wavefunction(n_sites)
#
# Creating the things we need for the Hamiltonian
#============================
from scipy import linalg as LA
from scipy import special as sp
#FUNDAMENTAL CONSTANTS
hbar = 6.582119569e-16 #eV*s
m_e = 0.51099895000e6 #eV/c^2
m_eff = 0.067
c_light = 299792458 #m/s
bohr = 5.7883818060e-2 #meV/T
lande = 0.52
rydberg=5.93 #meV
#=======
def vmatrix(n1l,m1l,n2l,m2l,n2r,m2r,n1r,m1r):
"""
Computes Coulomb matrix elements for a parabolic quantum dot. Analytic formula
derived by Marek Korkusinski can be found here: https://mysite.science.uottawa.ca/phawrylak/member_pages/korkusinski/thesis/thesis.html.
Computes <n1l m1l, n2l m2l|V|n2r m2r, n1r m1r>
Parameters
----------
n1l : int
index of Landau level of electron 1 for initial state.
m1l : int
index of quasidegenerate orbital of electron 1 for initial state.
n2l : int
index of Landau level of electron 2 for initial state.
m2l : int
index of quasidegenerate orbital of electron 1 for initial state.
n2r : int
index of Landau level of electron 2 for final state.
m2r : int
index of quasidegenerate orbital of electron 2 for final state.
n1r : int
index of Landau level of electron 1 for final state.
m1r : int
index of quasidegenerate orbital of electron 1 for final state.
Returns
-------
vmatel : float
<n1l m1l, n2l m2l|V|n2r m2r, n1r m1r>
"""
delta_rl_rr = kdelta((m1l+m2l)-(n1l+n2l),(m1r+m2r)-(n1r+n2r))
fac_denom = np.sqrt(sp.factorial(n1l)*sp.factorial(m1l)*\
sp.factorial(n1r)*sp.factorial(m1r)*\
sp.factorial(n2l)*sp.factorial(m2l)*\
sp.factorial(n2r)*sp.factorial(m2r))
phase = (-1)**(n2l+m2l+n2r+m2r)
total = 0.
for p1 in range(min(n1l,n1r)+1):
for p2 in range(min(m1l,m1r)+1):
for p3 in range(min(n2l,n2r)+1):
for p4 in range(min(m2l,m2r)+1):
power = n1l + n2l + m1r + m2r - (p1+p2+p3+p4)
p1fac=sp.factorial(p1)*sp.binom(n1l,p1)*sp.binom(n1r,p1)
p2fac=sp.factorial(p2)*sp.binom(m1l,p2)*sp.binom(m1r,p2)
p3fac=sp.factorial(p3)*sp.binom(n2l,p3)*sp.binom(n2r,p3)
p4fac=sp.factorial(p4)*sp.binom(m2l,p4)*sp.binom(m2r,p4)
gammafac=(-0.5)**power*sp.gamma(power+0.5)
total+=p1fac*p2fac*p3fac*p4fac*gammafac
vmatel = delta_rl_rr*phase*total/(fac_denom*np.sqrt(np.pi))
return vmatel
def sp_energies(n,m,B,spin,hbar,m_e,m_eff,c_light,bohr,lande,rydberg,omega_0,omega_c):
"""
Parameters
----------
n : int
Landau level.
m : int
Sub orbital in Landau level n.
B : float
Magnetic field in T.
spin : float
Spin of electron.
hbar : float
Planck's constant.
m_e : flaot
Mass of electron.
m_eff : float
Effective mass of electron.
c_light : float
Speed of light.
bohr : float
Bohr radius.
lande : float
g-factor.
rydberg : float
Rydberg energy.
omega_0 : float
Characteristic frequency of harmonic oscillator.
omega_c : float
Cyclotron frequency.
Returns
-------
energy : float
Single particle energy.
"""
omega_p=np.sqrt(omega_0**2+0.25*omega_c**2)+0.5*omega_c
omega_m=np.sqrt(omega_0**2+0.25*omega_c**2)-0.5*omega_c
energy = omega_p*(n+0.5)+omega_m*(m+0.5)-lande*bohr*B*spin
return energy
B=10
omega_0 = 3.31 #meV
omega_c = 1e3*hbar*B*c_light**2/(m_e*m_eff)
OMEGA_H = np.sqrt(omega_0**2+0.25*omega_c**2)/rydberg
E_0=np.sqrt(np.pi*OMEGA_H)*rydberg
epsilon = []
for m in range(4):
epsilon.append(sp_energies(1,m,B,0.5,hbar,m_e,m_eff,c_light,bohr,lande,rydberg,omega_0,omega_c))
v12=(vmatrix(1, 0, 1, 1, 1, 1, 1, 0)-vmatrix(1, 0, 1, 1, 1, 0, 1, 1))*E_0
v13=(vmatrix(1, 0, 1, 2, 1, 2, 1, 0)-vmatrix(1, 0, 1, 2, 1, 0, 1, 2))*E_0
v14=(vmatrix(1, 0, 1, 3, 1, 3, 1, 0)-vmatrix(1, 0, 1, 3, 1, 0, 1, 3))*E_0
v23=(vmatrix(1, 1, 1, 2, 1, 2, 1, 1)-vmatrix(1, 1, 1, 2, 1, 1, 1, 2))*E_0
v24=(vmatrix(1, 1, 1, 3, 1, 3, 1, 1)-vmatrix(1, 1, 1, 3, 1, 1, 1, 3))*E_0
v34=(vmatrix(1, 2, 1, 3, 1, 3, 1, 2)-vmatrix(1, 2, 1, 3, 1, 2, 1, 3))*E_0
w=(vmatrix(1, 0, 1, 3, 1, 1, 1, 2)-vmatrix(1, 0, 1, 3, 1, 2, 1, 1))*E_0
for m in range(4):
epsilon.append(sp_energies(1,m,B,0.5,hbar,m_e,m_eff,c_light,bohr,lande,rydberg,omega_0,omega_c))
# Create H MPO
G0 = np.array([[[0.]*4]*2]*2)
G1 = np.array([[[[0.]*6]*4]*2]*2)
G2 = np.array([[[[0.]*4]*6]*2]*2)
G3 = np.array([[[0.]*4]*2]*2)
for n0p in range(2):
for n0 in range(2):
G0[n0p,n0]=np.array([n0*kdelta(n0p,n0),kdelta(n0p,n0),kdelta(n0p-1,n0),kdelta(n0p,n0-1)])
for n1p in range(2):
for n1 in range(2):
G1[n1p,n1]=np.array([[v14*kdelta(n1p,n1),0,epsilon[0]*kdelta(n1p,n1)+v12*n1*kdelta(n1p,n1),v13*kdelta(n1p,n1),0,0]\
,[epsilon[3]*kdelta(n1p,n1)+v24*n1*kdelta(n1p,n1),v34*kdelta(n1p,n1),epsilon[1]*n1*kdelta(n1p,n1),epsilon[2]*\
kdelta(n1p,n1)+v23*n1*kdelta(n1p,n1),0,0],\
[0,0,0,0,-w*kdelta(n1p,n1-1),0],[0,0,0,0,0,-w*kdelta(n1p-1,n1)]])
for n2p in range(2):
for n2 in range(2):
G2[n2p,n2]=np.array([[kdelta(n2p,n2),0,0,0],[n2*kdelta(n2p,n2),0,0,0],[0,kdelta(n2p,n2),0,0],[0,n2*kdelta(n2p,n2),0,0],\
[0,0,kdelta(n2p,n2-1),0],[0,0,0,kdelta(n2p-1,n2)]])
for n3p in range(2):
for n3 in range(2):
G3[n3p,n3]=np.array([n3*kdelta(n3p,n3),kdelta(n3p,n3),kdelta(n3p-1,n3),kdelta(n3p,n3-1)])
#Create the chemical potential MPO
W0 = np.array([[[0.]*2]*2]*2)
W1 = np.array([[[[0.]*2]*2]*2]*2)
W2 = np.array([[[[0.]*2]*2]*2]*2)
W3 = np.array([[[0.]*2]*2]*2)
chem_pot=-35
for n0p in range(2):
for n0 in range(2):
W0[n0p,n0]=np.array([n0*kdelta(n0p,n0),kdelta(n0p,n0)])*chem_pot
for n1p in range(2):
for n1 in range(2):
W1[n1p,n1]=np.array([[kdelta(n1p,n1),0.],[n1*kdelta(n1p,n1),kdelta(n1p,n1)]])
for n2p in range(2):
for n2 in range(2):
W2[n2p,n2]=np.array([[kdelta(n2p,n2),0.],[n2*kdelta(n2p,n2),kdelta(n2p,n2)]])
for n3p in range(2):
for n3 in range(2):
W3[n3p,n3]=np.array([kdelta(n3p,n3),n3*kdelta(n3p,n3)])
O0 = np.array([[[0.]*6]*2]*2)
O1 = np.array([[[[0.]*8]*6]*2]*2)
O2 = np.array([[[[0.]*6]*8]*2]*2)
O3 = np.array([[[0.]*6]*2]*2)
for n0p in range(2):
for n0 in range(2):
O0[n0p,n0]=np.hstack((G0[n0p,n0],W0[n0p,n0]))
for n1p in range(2):
for n1 in range(2):
O1[n1p,n1]=SP.linalg.block_diag(G1[n1p,n1],W1[n1p,n1])
for n2p in range(2):
for n2 in range(2):
O2[n2p,n2]=SP.linalg.block_diag(G2[n2p,n2],W2[n2p,n2])
for n3p in range(2):
for n3 in range(2):
O3[n3p,n3]=np.hstack((G3[n3p,n3],W3[n3p,n3]))
#Creating MPO as a tensornetwork
hmpo = [ \
tn.Node(O0,axis_names=["n_0p","n_0","i_0"] )] + \
[tn.Node(O1,axis_names=["n_1p","n_1","i_0","i_1"])] + \
[tn.Node(O2,axis_names=["n_2p","n_2","i_1","i_2"])] + \
[tn.Node(O3,axis_names=["n_3p","n_3","i_2"])]
# Connect edges to build MPO
connected_edges2=[]
conn2=hmpo[0]["i_0"]^hmpo[1]["i_0"]
connected_edges2.append(conn2)
conn2=hmpo[1]["i_1"]^hmpo[2]["i_1"]
connected_edges2.append(conn2)
conn2=hmpo[2]["i_2"]^hmpo[3]["i_2"]
connected_edges2.append(conn2)
#Run DMRG algorithm
energy,energies,MPS=mp.DMRG(4,hmpo,10,mps)
MPS[0]["i_0"]^MPS[1]["i_0"]
MPS[1]["i_1"]^MPS[2]["i_1"]
MPS[2]["i_2"]^MPS[3]["i_2"]
test=MPS[0]@MPS[1]@MPS[2]@MPS[3]
np.transpose(np.where(np.abs(test.tensor)>=1e-10))[0]
number_e=np.count_nonzero(np.transpose(np.where(np.abs(test.tensor)>=1e-10))[0])
print('Corrected Energy = {}'.format(energy-number_e*chem_pot))
|
py | 1a3e8aee080da409251d5a1e7c77ff6171ad64d3 | '''Melhore o desafio 028, onde o computador
vai 'pensar' em um número entre 1 e 10.
Só que agora o jogador vai tentar adivinhar
até acertar, mostrando no final quantos palpites
foram necessários para vencer.'''
from random import choice
lista = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
cont = 0
res = 'S'
while res != 'N':
num = int(input('Digite um número inteiro entre 0 e 10. '))
random = choice(lista)
cont = cont + 1
if num == random:
print('Você precisou de {} para acertar.'.format(cont))
res = str(input('Quer continuar? [S/N]')).upper() |
py | 1a3e8b6f811344d3cd5e7e08fc4db2dfb6cfa290 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ComplianceStatusResponse',
'HelmOperatorPropertiesResponse',
]
@pulumi.output_type
class ComplianceStatusResponse(dict):
"""
Compliance Status details
"""
def __init__(__self__, *,
compliance_state: str,
last_config_applied: Optional[str] = None,
message: Optional[str] = None,
message_level: Optional[str] = None):
"""
Compliance Status details
:param str compliance_state: The compliance state of the configuration.
:param str last_config_applied: Datetime the configuration was last applied.
:param str message: Message from when the configuration was applied.
:param str message_level: Level of the message.
"""
pulumi.set(__self__, "compliance_state", compliance_state)
if last_config_applied is not None:
pulumi.set(__self__, "last_config_applied", last_config_applied)
if message is not None:
pulumi.set(__self__, "message", message)
if message_level is not None:
pulumi.set(__self__, "message_level", message_level)
@property
@pulumi.getter(name="complianceState")
def compliance_state(self) -> str:
"""
The compliance state of the configuration.
"""
return pulumi.get(self, "compliance_state")
@property
@pulumi.getter(name="lastConfigApplied")
def last_config_applied(self) -> Optional[str]:
"""
Datetime the configuration was last applied.
"""
return pulumi.get(self, "last_config_applied")
@property
@pulumi.getter
def message(self) -> Optional[str]:
"""
Message from when the configuration was applied.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter(name="messageLevel")
def message_level(self) -> Optional[str]:
"""
Level of the message.
"""
return pulumi.get(self, "message_level")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HelmOperatorPropertiesResponse(dict):
"""
Properties for Helm operator.
"""
def __init__(__self__, *,
chart_values: Optional[str] = None,
chart_version: Optional[str] = None):
"""
Properties for Helm operator.
:param str chart_values: Values override for the operator Helm chart.
:param str chart_version: Version of the operator Helm chart.
"""
if chart_values is not None:
pulumi.set(__self__, "chart_values", chart_values)
if chart_version is not None:
pulumi.set(__self__, "chart_version", chart_version)
@property
@pulumi.getter(name="chartValues")
def chart_values(self) -> Optional[str]:
"""
Values override for the operator Helm chart.
"""
return pulumi.get(self, "chart_values")
@property
@pulumi.getter(name="chartVersion")
def chart_version(self) -> Optional[str]:
"""
Version of the operator Helm chart.
"""
return pulumi.get(self, "chart_version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.