filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_23333 | import os
import numpy as np
import gym
import mujoco_py
from gym_kuka_mujoco.envs.assets import kuka_asset_dir
from gym_kuka_mujoco.utils.mujoco_utils import kuka_subtree_mass, get_qpos_indices, get_qvel_indices, get_actuator_indices, get_joint_indices
from .base_controller import BaseController
from . import register_controller
class DirectTorqueController(BaseController):
'''
A simple controller that takes raw torque actions.
'''
def __init__(self,
sim,
action_scaling=10.,
gravity_comp_model_path=None,
controlled_joints=None):
super(DirectTorqueController, self).__init__(sim)
self.gravity_comp = False
if gravity_comp_model_path is not None:
self.gravity_comp = True
model_path = os.path.join(kuka_asset_dir(), gravity_comp_model_path)
self.model = mujoco_py.load_model_from_path(model_path)
self.gravity_comp_sim = mujoco_py.MjSim(self.model)
assert self.model.nv == self.sim.model.nv, \
"the model for control and simulation must have the same number of DOF"
# Get the position, velocity, and actuator indices for the model.
if controlled_joints is not None:
self.sim_qpos_idx = get_qpos_indices(sim.model, controlled_joints)
self.sim_qvel_idx = get_qvel_indices(sim.model, controlled_joints)
self.sim_actuators_idx = get_actuator_indices(sim.model, controlled_joints)
self.sim_joint_idx = get_joint_indices(sim.model, controlled_joints)
self.self_qpos_idx = get_qpos_indices(self.model, controlled_joints)
self.self_qvel_idx = get_qvel_indices(self.model, controlled_joints)
self.self_actuators_idx = get_actuator_indices(self.model, controlled_joints)
else:
assert self.model.nv == self.model.nu, "if the number of degrees of freedom is different than the number of actuators you must specify the controlled_joints"
self.sim_qpos_idx = range(self.model.nq)
self.sim_qvel_idx = range(self.model.nv)
self.sim_actuators_idx = range(self.model.nu)
self.sim_joint_idx = range(self.model.nu)
self.self_qpos_idx = range(self.model.nq)
self.self_qvel_idx = range(self.model.nv)
self.self_actuators_idx = range(self.model.nu)
# Scale the actions proportionally to the subtree mass.
true_subtree_mass = kuka_subtree_mass(sim.model)
normalized_subtree_mass = true_subtree_mass / np.max(true_subtree_mass)
self.action_scaling = action_scaling * normalized_subtree_mass
# Scale the action space to the new scaling.
low = sim.model.actuator_ctrlrange[:, 0]/action_scaling
high = sim.model.actuator_ctrlrange[:, 1]/action_scaling
self.action_space = gym.spaces.Box(low, high, dtype=np.float32)
def set_action(self, action):
self.torque = action*self.action_scaling
def get_torque(self):
torque = self.torque.copy()
# Add gravity compensation if necessary
if self.gravity_comp:
self.gravity_comp_sim.data.qpos[:] = self.sim.data.qpos[:].copy()
self.gravity_comp_sim.data.qvel[:] = np.zeros(self.model.nv)
self.gravity_comp_sim.data.qacc[:] = np.zeros(self.model.nv)
mujoco_py.functions.mj_inverse(self.model, self.gravity_comp_sim.data)
torque += self.gravity_comp_sim.data.qfrc_inverse[self.sim_actuators_idx].copy()
return torque
class SACTorqueController(DirectTorqueController):
'''
A simple controller that takes raw torque actions.
'''
def __init__(self, sim, action_limit=1., **kwargs):
super(SACTorqueController, self).__init__(sim, **kwargs)
# Reduce the torque limits.
limited_low = self.action_space.low*action_limit
limited_high = self.action_space.high*action_limit
self.action_space = gym.spaces.Box(limited_low, limited_high, dtype=np.float32)
register_controller(DirectTorqueController, 'DirectTorqueController')
register_controller(SACTorqueController, 'SACTorqueController') |
the-stack_0_23334 | import ptCrypt.Math.base as base
class Curve:
class Point:
ZERO = 'O'
def __init__(self, curve, x, y):
self.curve = curve
self.x = x
self.y = y
def __add__(self, other):
assert self.curve == other.curve
if self.x == 'O':
return other
if other.x == 'O':
return self
if self == -other:
return self.curve.point('O', 'O')
l = 0
if self != other:
low = other.x - self.x
if self.curve.p:
low = low % self.curve.p
r, a, b = base.egcd(low, self.curve.p)
if r != 1: return r
l = (other.y - self.y) * a % self.curve.p
else:
l = (other.y - self.y) / low
else:
low = 2 * self.y
if self.curve.p:
low = low % self.curve.p
r, a, b = base.egcd(low % self.curve.p, self.curve.p)
if r != 1: return r
l = (3 * pow(self.x, 2) + self.curve.a) * a % self.curve.p
else:
l = (3 * pow(self.x, 2) + self.curve.a) / (2 * self.y)
x3 = pow(l, 2) - self.x - other.x
y3 = l * (self.x - x3) - self.y
if self.curve.p:
x3 = x3 % self.curve.p
y3 = y3 % self.curve.p
return self.curve.point(x3, y3)
def __sub__(self, other):
return self + (-other)
def __neg__(self):
return self.curve.point(self.x, -self.y)
def __repr__(self):
return f"({self.x}, {self.y})"
def __eq__(self, other):
return self.curve == other.curve and self.x == other.x and self.y == other.y
def __mul__(self, number):
Q = self
R = self.curve.point(Curve.Point.ZERO, Curve.Point.ZERO)
n = number
while n > 0:
if n % 2:
R = R + Q
if type(R) is int:
return R
Q = Q + Q
if type(Q) is int:
return Q
n = n >> 1
return R
def __rmul__(self, number):
return self * number
def __init__(self, a, b, p=None):
self.a = a
self.b = b
self.p = p
if self.hasSingularPoints():
print(f"[WARNING] Curve {self} has singular points")
def __eq__(self, other):
return self.a == other.a and self.b == other.b and self.p == other.p
def __repr__(self):
res = "x^3"
if self.a >= 0:
res += f"+{self.a}x"
else:
res += f"{self.a}x"
if self.b >= 0:
res += f"+{self.b}"
else:
res += f"{self.b}"
if self.p:
return res + f" over F({self.p})"
else:
return res
def point(self, x, y):
return Curve.Point(self, x, y)
def hasSingularPoints(self):
return 4 * pow(self.a, 3) + 27 * pow(self.b, 2) == 0
|
the-stack_0_23335 | # python3 DataPreparation.py
# -*- coding: utf-8 -*-
# ===========================================================================================
# Created by: Ann-Kathrin Jauk
# Description: Reads in data from csv/json files, converts datatypes where needed (e.g.
# with date string to datetime), drops columns/nan-values and returns pandas dataframe
# ===========================================================================================
import numpy as np
import DataProcessing.DataGenerators as dg
import pandas as pd
import DataProcessing.DataPreparation as dp
def prepareWeatherData():
'''
Reads weather data from csv, drops unnecessary columns, returns dataframe
:return: weather: (pandas.dataframe)
prepared weather data
'''
# columnnames: date, tavg (average temperature), tmin (min. temp.), tmax (max. temp.),
# prcp (overall precipitation/Gesamtniederschlag), snow, wdir (wind direction),
# wspd (wind speed), wpgt (wind peak/Spitzenboe), pres (pressure/Luftdruck),
# tsun (time of sunshine)
print("Preparing Weather Data")
weather = dg.gWeather.generateWeatherData()
# drop all columns except date, tavg, tmin and tmax
weather = weather.drop(columns=['prcp', 'snow', 'wdir', 'wspd', 'wpgt', 'pres', 'tsun'])
print("Finished")
return weather
def prepareArticlesData():
'''
Reads articles data from csv, replaces empty values with NaN, returns dataframe
:return: articles: (pandas.dataframe)
prepared articles data
'''
print("Preparing Articles Data")
# get ArticlesData (without parameter: use already generatedData
articles = dg.gArticles.generateArticlesData()
# replace empty/blank spaced values with NaN
articles = articles.replace(r'^s*$', np.nan, regex=True)
print("Finished")
return articles
def prepareStockArticlesData():
'''
Reads stockarticles data from csv, merges with articles for calculation of Best-By-Date,
drops articles with NaN Best-By-Period, returns dataframe
:return: stock: (pandas.dataframe)
prepared stockarticles data
'''
print("Preparing Stockarticles Data")
# get stockArticles and ArticlesData (without parameter: use already generatedData, else True)
stockArticles = dg.gStockarticles.generateStockArticles(False)
articles = dg.gArticles.generateArticlesData()
# print(stockArticles)
# print(articles)
#drop and rename columns
articles = articles.drop(columns=['Article', 'Unit'])
articles = articles.rename(columns={'ID':'articleID'})
# merge on ArticleID
merged = pd.merge(stockArticles, articles, left_on='articleID', right_on='articleID')
merged = merged.rename(columns={'ID': 'stockarticleID'})
# drop nan a.k.a articles without Best By Period
merged = merged.dropna()
# calculate Best By Date
merged['productionDate'] = pd.to_datetime(merged['productionDate'])
merged['BestByDate'] = merged['productionDate'] + pd.to_timedelta(merged['Best By Period'], unit='d')
merged = merged.drop(columns=['Best By Period'])
merged = merged.sort_values(["articleID", "productionDate"]).reset_index(drop=True)
print("Finished")
# workaround for sums not being treated as objects but as numeric values
merged.to_csv('../Datasets/Stockarticles/stockarticles_prepared.csv', index=False)
stock = pd.read_csv('../Datasets/Stockarticles/stockarticles_prepared.csv', parse_dates=['productionDate',
'BestByDate'])
return stock
def prepareSalesData():
'''
Reads sales data from json, sums sales quantities per article per day, returns dataframe
:return: sales: (pandas.dataframe)
prepared sales data
'''
print("Preparing Sales Data")
#get SalesData (without parameter: use already generatedData
sales = dg.gSales.generateSalesData(False)
#Get unique dates of sales dataframe
dates = pd.unique(sales['date'])
#Returns list of unique articleIDs of a sales dataframe
def getIdListOf(df):
idList = []
for articles in df['soldArticles']:
for article in articles:
id = article['articleId']
if id not in idList:
idList.append(id)
return idList
#returns sums of quantities grouped by articleId
def getSumPerArticleOfDay(salesDay, idList):
articleQuantity = {}
for id in idList:
articleQuantity[id] = 0
for id in idList:
for articles in salesDay['soldArticles']:
for article in articles:
if id == article['articleId']:
articleQuantity[id] += article['quantity']
return articleQuantity
#idList of all sales
idListSales = getIdListOf(sales)
idListSales.sort()
##preparing prepared sales dataframe
#columnNames are date and all unique articleIDs
columnNames = ['date']
for id in idListSales:
columnNames.append(id)
#initializing new dataframe
preparedSales = pd.DataFrame(columns=columnNames)
#using unique dates for 'date' column
preparedSales['date'] = dates
#mapping sales summed per day on new dataframe with unique dates
row = 0
for date in dates:
df = sales.loc[(sales['date'] == date)]
idList = getIdListOf(df)
articleQuantity = getSumPerArticleOfDay(df, idList)
for key, value in articleQuantity.items():
if preparedSales['date'][row] == date:
if pd.isna(value):
value = np.nan
preparedSales.iloc[row, key] = value
row += 1
#changing article id columnnames to include "articleId_"
for index, name in enumerate(columnNames):
if type(name) == int:
columnNames[index] = 'articleId_' + str(name)
preparedSales.columns = columnNames
print("Finished")
# workaround for sums not being treated as objects but as numeric values
preparedSales.to_csv('../Datasets/Sales/sales_prepared.csv', index=False)
sales = pd.read_csv('../Datasets/Sales/sales_prepared.csv', parse_dates=['date'])
return sales |
the-stack_0_23336 | import asyncio
from blspy import G2Element
from clvm_tools import binutils
from taco.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from taco.rpc.full_node_rpc_client import FullNodeRpcClient
from taco.types.blockchain_format.program import Program
from taco.types.coin_solution import CoinSolution
from taco.types.condition_opcodes import ConditionOpcode
from taco.types.spend_bundle import SpendBundle
from taco.util.bech32m import decode_puzzle_hash
from taco.util.condition_tools import parse_sexp_to_conditions
from taco.util.config import load_config
from taco.util.default_root import DEFAULT_ROOT_PATH
from taco.util.ints import uint32, uint16
def print_conditions(spend_bundle: SpendBundle):
print("\nConditions:")
for coin_solution in spend_bundle.coin_solutions:
result = Program.from_bytes(bytes(coin_solution.puzzle_reveal)).run(
Program.from_bytes(bytes(coin_solution.solution))
)
error, result_human = parse_sexp_to_conditions(result)
assert error is None
assert result_human is not None
for cvp in result_human:
print(f"{ConditionOpcode(cvp.opcode).name}: {[var.hex() for var in cvp.vars]}")
print("")
async def main() -> None:
rpc_port: uint16 = uint16(8555)
self_hostname = "localhost"
path = DEFAULT_ROOT_PATH
config = load_config(path, "config.yaml")
client = await FullNodeRpcClient.create(self_hostname, rpc_port, path, config)
try:
farmer_prefarm = (await client.get_block_record_by_height(1)).reward_claims_incorporated[1]
pool_prefarm = (await client.get_block_record_by_height(1)).reward_claims_incorporated[0]
pool_amounts = int(calculate_pool_reward(uint32(0)) / 2)
farmer_amounts = int(calculate_base_farmer_reward(uint32(0)) / 2)
print(farmer_prefarm.amount, farmer_amounts)
assert farmer_amounts == farmer_prefarm.amount // 2
assert pool_amounts == pool_prefarm.amount // 2
address1 = "xtx1rdatypul5c642jkeh4yp933zu3hw8vv8tfup8ta6zfampnyhjnusxdgns6" # Key 1
address2 = "xtx1duvy5ur5eyj7lp5geetfg84cj2d7xgpxt7pya3lr2y6ke3696w9qvda66e" # Key 2
ph1 = decode_puzzle_hash(address1)
ph2 = decode_puzzle_hash(address2)
p_farmer_2 = Program.to(
binutils.assemble(f"(q . ((51 0x{ph1.hex()} {farmer_amounts}) (51 0x{ph2.hex()} {farmer_amounts})))")
)
p_pool_2 = Program.to(
binutils.assemble(f"(q . ((51 0x{ph1.hex()} {pool_amounts}) (51 0x{ph2.hex()} {pool_amounts})))")
)
print(f"Ph1: {ph1.hex()}")
print(f"Ph2: {ph2.hex()}")
assert ph1.hex() == "1b7ab2079fa635554ad9bd4812c622e46ee3b1875a7813afba127bb0cc9794f9"
assert ph2.hex() == "6f184a7074c925ef8688ce56941eb8929be320265f824ec7e351356cc745d38a"
p_solution = Program.to(binutils.assemble("()"))
sb_farmer = SpendBundle([CoinSolution(farmer_prefarm, p_farmer_2, p_solution)], G2Element())
sb_pool = SpendBundle([CoinSolution(pool_prefarm, p_pool_2, p_solution)], G2Element())
print("\n\n\nConditions")
print_conditions(sb_pool)
print("\n\n\n")
print("Farmer to spend")
print(sb_pool)
print(sb_farmer)
print("\n\n\n")
# res = await client.push_tx(sb_farmer)
# res = await client.push_tx(sb_pool)
# print(res)
up = await client.get_coin_records_by_puzzle_hash(farmer_prefarm.puzzle_hash, True)
uf = await client.get_coin_records_by_puzzle_hash(pool_prefarm.puzzle_hash, True)
print(up)
print(uf)
finally:
client.close()
asyncio.run(main())
|
the-stack_0_23337 | """
byceps.blueprints.site.user.avatar.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import abort, g, request
from flask_babel import gettext
from .....services.image import service as image_service
from .....services.user_avatar import service as avatar_service
from .....signals import user_avatar as user_avatar_signals
from .....util.framework.blueprint import create_blueprint
from .....util.framework.flash import flash_notice, flash_success
from .....util.image.models import ImageType
from .....util.framework.templating import templated
from .....util.views import redirect_to, respond_no_content
from .forms import UpdateForm
blueprint = create_blueprint('user_avatar', __name__)
ALLOWED_IMAGE_TYPES = frozenset(
[
ImageType.jpeg,
ImageType.png,
ImageType.webp,
]
)
@blueprint.get('/me/avatar/update')
@templated
def update_form(erroneous_form=None):
"""Show a form to update the current user's avatar image."""
_get_current_user_or_404()
form = erroneous_form if erroneous_form else UpdateForm()
image_type_names = image_service.get_image_type_names(ALLOWED_IMAGE_TYPES)
return {
'form': form,
'allowed_types': image_type_names,
'maximum_dimensions': avatar_service.MAXIMUM_DIMENSIONS,
}
@blueprint.post('/me/avatar')
def update():
"""Update the current user's avatar image."""
user = _get_current_user_or_404()
# Make `InputRequired` work on `FileField`.
form_fields = request.form.copy()
if request.files:
form_fields.update(request.files)
form = UpdateForm(form_fields)
if not form.validate():
return update_form(form)
image = request.files.get('image')
_update(user.id, image)
flash_success(gettext('Avatar image has been updated.'), icon='upload')
user_avatar_signals.avatar_updated.send(None, user_id=user.id)
return redirect_to('user_settings.view')
def _update(user_id, image):
if not image or not image.filename:
abort(400, 'No file to upload has been specified.')
try:
avatar_service.update_avatar_image(
user_id, image.stream, ALLOWED_IMAGE_TYPES
)
except avatar_service.ImageTypeProhibited as e:
abort(400, str(e))
except FileExistsError:
abort(409, 'File already exists, not overwriting.')
@blueprint.delete('/me/avatar')
@respond_no_content
def delete():
"""Remove the current user's avatar image."""
user = _get_current_user_or_404()
try:
avatar_service.remove_avatar_image(user.id)
except ValueError:
# No avatar selected.
# But that's ok, deletions should be idempotent.
flash_notice(gettext('No avatar image is set that could be removed.'))
else:
flash_success(gettext('Avatar image has been removed.'))
def _get_current_user_or_404():
user = g.user
if not user.authenticated:
abort(404)
return user
|
the-stack_0_23338 | import os
import json
import argparse
import numpy as np
import pandas as pd
import matplotlib as mpl
from .base import Task, BalanceStrategy, ImpMethod, ClassificationScore, convert_model_name
from .preprocessing import prepare_data
from .evaluation import extract_target, get_data_splitter, make_scorer, sanitize_setup, run_cv
from .export import plot_performance_summary, store_individual_reports, plot_ranking_results, plot_rfe_scores
from .feature_selection import get_rfe_scores
mpl.rcParams.update({'font.size': 14, 'figure.figsize': (5, 5), 'savefig.bbox': 'tight'})
NUM_FOLDS = 5
RANDOM_SEED = 42
def run():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--path-to-input', type=str, required=True)
parser.add_argument('-o', '--out-dir', type=str, required=True)
parser.add_argument('-t', '--task', type=Task.argtype, choices=Task, default=Task.classification)
parser.add_argument('-n', '--fillna', type=ImpMethod.argtype, choices=ImpMethod, default=None)
parser.add_argument('-b', '--class-balance', type=BalanceStrategy.argtype, choices=BalanceStrategy, default=None)
parser.add_argument(
'-c', '--classification-score', type=ClassificationScore.argtype, choices=ClassificationScore, default=None
)
parser.add_argument('-r', '--random-seed', type=int, default=RANDOM_SEED)
parser.add_argument('-f', '--num-folds', type=int, default=NUM_FOLDS)
parser.add_argument('-S', '--save-individual', action='store_true')
args = parser.parse_args()
np.random.seed(args.random_seed)
task_id = 'regr' if args.task == Task.regression else 'clf'
# create an output directory
out_dir = f'{args.out_dir}/task_{task_id}'
os.makedirs(out_dir, exist_ok=True)
# load data
print(f'Loading `{os.path.basename(args.path_to_input)}`...')
df = pd.read_csv(args.path_to_input, index_col=0)
df = prepare_data(df, args.task, f'target_{task_id}', args.fillna, args.class_balance)
setup = json.load(open('protocols.json'))
# split the data
X, y = extract_target(df, f'target_{task_id}') # noqa
splitter = get_data_splitter(args.num_folds, args.random_seed)
# define experimental setup
scorer = make_scorer(args.task, args.classification_score)
setup = sanitize_setup(args, setup)
# train and test the models
reports_summary, reports_individual = run_cv(X, y, scorer, splitter, setup)
# save performance reports
reports_summary.to_csv(f'{out_dir}/summary.csv', sep=';') # noqa
store_individual_reports(reports_individual, out_dir, args.save_individual)
# visualize performance summary
plot_performance_summary(
summary=reports_summary,
score_name=scorer["name"],
out_dir=out_dir
)
# rank features using RFE-based-on-best-model
best_model_name = reports_summary.iloc[0].name
best_params = {**setup[best_model_name]['params_static'], **reports_summary.iloc[0].params}
rfe_scores, rfe_ranking = get_rfe_scores(X, y, args.task, best_model_name, best_params, scorer, splitter)
# save RFE summary
pd.DataFrame.from_records(
rfe_scores, index=list(range(1, len(rfe_scores) + 1)), columns=[f'split{n}' for n in range(args.num_folds)]
).to_csv(f'{out_dir}/scores_RFE-{convert_model_name(best_model_name)}.csv') # noqa
rfe_ranking.to_csv(f'{out_dir}/ranking_RFE-{convert_model_name(best_model_name)}.csv') # noqa
# visualize RFE summary
plot_rfe_scores(
rfe_scores,
model_name=f'RFE-{best_model_name}',
out_dir=out_dir
)
plot_ranking_results(
rfe_ranking,
model_name=f'RFE-{best_model_name}',
out_dir=out_dir
)
print(f'Process finished successfully. Results are stored in `{out_dir}`.')
if __name__ == '__main__':
run()
|
the-stack_0_23339 | import os, sys, copy
import pickle
import math
import time
import numpy as np
from typing import Dict, Any, List, Set, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pad_sequence
import torch.nn.utils.rnn as rnn_utils
from agent.environment.position import Position
from agent.environment import card as agent_cards
from . import util
from .map_transformations import pose as pose_lib
from .modules import state_embedder as embedder_lib
from .utilities import initialization
from .helpers import state_representation
from .utilities import hex_util
from .utilities.hex_conv_util import HexConv
def getPositionalEncoding(d_model=768, max_len=1024):
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
return pe
def generate_attention_mask_from_mask_indicies_and_instruction_tensors(feature_attention_mask, instruction_tensors) -> torch.tensor:
attention_mask = torch.cat([feature_attention_mask, torch.ones(instruction_tensors.shape).to(util.DEVICE).bool()], 1)
return attention_mask
class CNNLSTMStateEncodingModel(nn.Module):
def __init__(self, config):
super(CNNLSTMStateEncodingModel, self).__init__()
self._d_input = 19
self._d_embed = config["d_embed"]
self._d_model = config["d_model"]
self._embeddings_type = config["embeddings_type"]
self._breakpoint_type = config["breakpoint_type"]
if self._breakpoint_type == "":
pass
elif self._breakpoint_type == "onehot":
self._d_input += 1
else:
raise ValueError("not supported breakpoint type")
self._conv = []
# embedding layer
self._n_depth = config["n_depth"]
if self._embeddings_type == "learned":
if "state_embedder_pretrained_model" in config:
pretrained_model = config["state_embedder_pretrained_model"]
else:
pretrained_model = ""
self._embedder = embedder_lib.StateEmbedder(
self._d_embed, pretrained_model)
self._d_input = self._embedder.embedding_size()
else:
if self._embeddings_type == "onehot":
self._embedder = embedder_lib.StateOnehotEmbedder()
self._d_input = self._embedder.embedding_size()
elif self._embeddings_type == "none":
self._embedder = None
if self._n_depth != 0:
conv_module = nn.ModuleList([])
conv_layer = nn.Conv2d(self._d_input, self._d_model, (1, 1))
conv_module.append(conv_layer)
conv_module.append(nn.LeakyReLU())
if torch.cuda.is_available():
conv_module = conv_module.to(util.DEVICE)
self._conv.append(conv_module)
# convolutional Layer
self._rcpf_size = config["rcpf_size"]
self._cnn_use_norm = config["cnn_use_norm"]
self._cnn_hex = config["cnn_hex"]
self._cnn_actv_func = config["cnn_actv_func"]
padding_size = int((self._rcpf_size-1)/2)
for d in range(self._n_depth-1):
conv_module = nn.ModuleList([])
if d == 0 and self._embeddings_type == "learned":
conv_in_channels = self._d_input
else:
conv_in_channels = self._d_model
if self._cnn_use_norm:
norm = nn.InstanceNorm2d(conv_in_channels)
conv_module.append(norm)
conv_out_channels: int = self._d_model
if self._cnn_hex:
conv_layer = HexConv(conv_in_channels, conv_out_channels,
self._rcpf_size, stride=1, padding=padding_size)
else:
conv_layer = nn.Conv2d(conv_in_channels, conv_out_channels,
(self._rcpf_size, self._rcpf_size), padding=(padding_size, padding_size))
conv_module.append(conv_layer)
if self._cnn_actv_func == "leaky_relu":
conv_module.append(nn.LeakyReLU())
elif self._cnn_actv_func == "tanh":
conv_module.append(nn.Tanh())
if torch.cuda.is_available():
conv_module = conv_module.to(util.DEVICE)
self._conv.append(conv_module)
if len(self._conv) == 0:
self._d_model = self._d_input
self._conv = nn.ModuleList(self._conv)
self._conv_output_channel = conv_out_channels
# feature translation and rotation layers
self._feature_map_size = config["feature_map_size"] if "feature_map_size" in config else 3
self._feature_filter_size = config["feature_filter_size"] if "feature_filter_size" in config else self._feature_map_size
self._rotate_feature_map = config["rotate_feature_map"] if "rotate_feature_map" in config else True
self._feature_cnn_n_depth = config["feature_cnn_n_depth"] if "feature_cnn_n_depth" in config else 0
self._feature_merge_type = config["feature_merge_type"] if "feature_merge_type" in config else "sum"
self._feature_output_dimension = config["feature_output_dimension"] if "feature_output_dimension" in config else 512
self._feature_cnn_actv_func = config["feature_cnn_actv_func"] if "feature_cnn_actv_func" in config else 0
self._feature_cnn_use_norm = config["feature_cnn_use_norm"] if "feature_cnn_use_norm" in config else True
self._feature_conv = []
try:
assert(self._feature_output_dimension * (self._feature_map_size)**2 //
(self._feature_map_size)**2 == self._feature_output_dimension)
except:
raise ValueError(
"Feature output dimension is not divisible by the nubmer of hexes to be clopped.")
for d in range(self._feature_cnn_n_depth):
conv_module = nn.ModuleList([])
if self._feature_cnn_use_norm:
norm = nn.InstanceNorm2d(512) #! not adaptive
conv_module.append(norm)
if self._feature_merge_type == "cat":
traj_output_channel = self._feature_output_dimension // (self._feature_map_size)**2
padding = (self._feature_filter_size-1)//2
if self._cnn_hex:
conv_layer = HexConv(self._conv_output_channel, traj_output_channel,
self._feature_filter_size, stride=1, padding=padding)
else:
conv_layer = nn.Conv2d(self._conv_output_channel, traj_output_channel, (
self._feature_filter_size, self._feature_filter_size), padding=(padding, padding))
self._conv_output_channel = traj_output_channel
elif self._feature_merge_type == "sum":
traj_output_channel = self._conv_output_channel
if self._cnn_hex:
conv_layer = HexConv(self._conv_output_channel, traj_output_channel,
self._feature_map_size, stride=1, padding=0)
else:
conv_layer = nn.Conv2d(self._conv_output_channel, traj_output_channel,
(self._feature_map_size, self._feature_map_size), padding=(0, 0))
conv_module.append(conv_layer)
if self._cnn_actv_func == "tanh":
conv_module.append(nn.Tanh())
self._feature_conv.append(conv_module)
self._feature_conv = nn.ModuleList(self._feature_conv)
if self._feature_merge_type == "cat":
self._conv_output_channel = self._feature_output_dimension
self._d_model = self._feature_output_dimension
elif self._feature_merge_type == "sum":
self._d_model = traj_output_channel
self._rotator = hex_util.Hex_Rotator()
# LSTM Layer
# 0. Pose + breakpoint embedder
# 1. Preprocessing linear layer (optional)
# 2. LSTM layer
# 2.1 Optional skip connection
self._lstm_input_merge_type = config["lstm_input_merge_type"]
self._lstm_output_merge_type = config["lstm_output_merge_type"]
self._lstm_skip = config["lstm_skip"]
if self._lstm_input_merge_type == "cat":
self._traj_break_embedder = embedder_lib.TrajBreakEmbedder(config["lstm_pb_dim"])
lstm_input_dim = self._d_model + config["lstm_pb_dim"]
lstm_output_dim = config["lstm_d_model"]
elif self._lstm_input_merge_type == "add":
self._traj_break_embedder = embedder_lib.TrajBreakEmbedder(self._d_model)
lstm_input_dim = self._d_model
lstm_output_dim = config["lstm_d_model"]
self._lstm = nn.LSTM(
input_size=lstm_input_dim,
hidden_size=lstm_output_dim,
num_layers=config["lstm_num_layers"],
bidirectional=config["lstm_bidirectional"],
dropout=config["lstm_dropout"],
batch_first=True,
)
if config["lstm_bidirectional"]:
lstm_output_dim = lstm_output_dim * 2
else:
lstm_output_dim = config["lstm_d_model"]
if self._lstm_skip:
if self._lstm_output_merge_type == "spatial-cat":
self._d_model = lstm_output_dim + self._d_model // (self._feature_map_size)**2
else:
try:
assert(self._lstm_output_merge_type != "spatial-cat")
except:
raise ValueError(
"Spaitial conceteneation option is only supported for LSTM with a skip coonection.")
self._d_model = lstm_output_dim
if torch.cuda.is_available():
self._lstm.to(util.DEVICE)
def forward(self, x, traj=None, bkpoint=None):
input = x.transpose(1, 3) # [BWHC] ==> [BCHW]
input = input.transpose(2, 3) # [BCHW] ==>[BCWH]
# input processing
input[:, 15, :, :] = torch.clamp(input[:, 15, :, :], 0, 1)
input = input.detach()
input = input.contiguous()
# embeddings layer
if self._embedder is not None:
input = self._embedder(input)
# hex CNN 1
conv_outputs: List[torch.Tensor] = list()
for i, layer in enumerate(self._conv):
conv_in = input if i == 0 else conv_outputs[-1]
x = conv_in
for l in layer:
x = l(x)
# residual coneection (if k != 1)
if (i != 0 and i != self._n_depth):
x = x + conv_outputs[-1]
conv_outputs.append(x)
if len(self._conv) == 0:
final_feature = input
else:
final_feature = conv_outputs[-1]
# cropping features
if self._feature_map_size != 1:
center = (self._feature_map_size-1) // 2
# Syntax: https://discuss.pytorch.org/t/is-there-a-way-to-pad-a-tensor-instead-of-variable/10448/2
final_feature = F.pad(final_feature, (center, center, center, center))
features = []
spatial_features = []
pb_features = []
batch_idx_list = [[i for _ in range(len(t))] for i, t in enumerate(traj)]
final_feature_mask_indicies = [len(t) for t in traj]
batch_idx = []
for l in batch_idx_list:
batch_idx += l
batch_idx = torch.tensor(batch_idx).to(util.DEVICE)
coords = torch.cat(traj,0)
h_mask = coords[:, 0]
w_mask = coords[:, 1]
pose = coords[:, 2]
h_mask = h_mask.detach()
w_mask = w_mask.detach()
if self._feature_map_size == 1:
feature = final_feature[i, :, h_mask, w_mask]
feature = feature.permute(1, 0)
else:
rows = [h_mask + (slack-center) for slack in range(self._feature_map_size)]
rows = torch.stack(rows, 0).unsqueeze(1)
rows = rows.repeat(1, self._feature_map_size, 1)
rows = rows + center # need to add center bc of padding
rows = rows.detach()
cols = [w_mask + (slack-center) for slack in range(self._feature_map_size)]
cols = torch.stack(cols, 0).unsqueeze(0)
cols = cols.repeat(self._feature_map_size, 1, 1)
cols = cols + center # need to add center bc of padding
cols = cols.detach()
batch_idx = batch_idx.unsqueeze(0).unsqueeze(0)
batch_idx = batch_idx.repeat(self._feature_map_size, self._feature_map_size, 1)
feature = final_feature[batch_idx, :, rows, cols]
feature = feature.permute(2, 3, 0, 1) # TxDxHxW
# rotate features
if self._rotate_feature_map:
mask_l = len(h_mask)
# converting to offset coordinates
pose_position = torch.tensor([[center+center//2, center]
for _ in range(mask_l)]).to(util.DEVICE)
pose_rot = (pose-1) * math.radians(60)
pose_obj = pose_lib.Pose(pose_position, pose_rot)
new_feature = self._rotator.translate_and_rotate(feature, pose_obj)
feature = new_feature
# hex CNN 2
feature = feature.contiguous()
x = feature
for i, layer in enumerate(self._feature_conv):
for l in layer:
x = l(x)
spatial_feature = x.view(x.shape[0], x.shape[1], x.shape[2]*x.shape[3]) #LxDX(H*W)
feature = torch.cat([spatial_feature[:, :, i]
for i in range(spatial_feature.shape[2])], 1) # LxDX(H*W)
# attach pose features
bk_onehot = torch.zeros(pose.shape).long().to(util.DEVICE)
pose_bk_raw_features = torch.stack([pose, bk_onehot], 0)
pb_feature = self._traj_break_embedder(pose_bk_raw_features)
if self._lstm_input_merge_type == "cat":
feature = torch.cat([feature, pb_feature], 1)
elif self._lstm_input_merge_type == "add":
feature += pb_feature
spatial_features = torch.split(spatial_feature, final_feature_mask_indicies)
features = torch.split(feature, final_feature_mask_indicies)
# LSTM layer
# reference: https://discuss.pytorch.org/t/how-can-i-compute-seq2seq-loss-using-mask/861
lstm_input = pad_sequence(features, 1, padding_value=0)
unpacked = lstm_input.permute(1, 0, 2)
packed = rnn_utils.pack_padded_sequence(unpacked, final_feature_mask_indicies, enforce_sorted=False)
outputs, _ = self._lstm(packed, None)
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(outputs)
final_feature = unpacked.permute(1, 0, 2)
final_feature = final_feature.contiguous()
if self._lstm_skip:
spatial_features = pad_sequence(spatial_features, 1, padding_value=0)
final_feature = final_feature.unsqueeze(-1)
final_feature = final_feature.repeat(1, 1, 1, spatial_features.shape[-1])
final_feature = torch.cat([final_feature, spatial_features], 2)
final_feature = final_feature.permute(0, 1, 3, 2)
final_feature = final_feature.contiguous().view(
(final_feature.shape[0], final_feature.shape[1]*final_feature.shape[2], final_feature.shape[3]))
final_feature = final_feature.contiguous()
# generate attention mask for feature
feature_attention_mask = torch.ones(final_feature.shape[:2]).to(util.DEVICE)
batch_size = final_feature.shape[0]
neighbor_size = spatial_features.shape[-1]
for i in range(batch_size):
feature_attention_mask[i, neighbor_size*final_feature_mask_indicies[i]:] = 0
feature_attention_mask = feature_attention_mask.bool()
return final_feature, feature_attention_mask
def get_dimension(self):
return self._d_model
|
the-stack_0_23340 | # Copyright (c) 2013, Venkatesh and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from datetime import datetime
def execute(filters=None):
columns, data = [], []
columns = [
{
"fieldname": "sold_item",
"fieldtype": "Link",
"options": "Item",
"label": "Sold Item",
"width": 100
},
{
"fieldname": "no_of_items",
"fieldtype": "data",
"label": "No of Items",
"width": 100
},
{
"fieldname": "sold_amount",
"fieldtype": "data",
"label": "Sold Amount",
"width": 100
}
]
if filters.from_date and filters.to_date:
data = get_data(filters)
return columns, data
def get_data(filters):
to_date = datetime.strptime(filters.to_date, "%Y-%m-%d").date()
from_date = datetime.strptime(filters.from_date, "%Y-%m-%d").date()
return frappe.db.sql('''select item_name, qty, amount from `tabSales Invoice Item` where creation>=%s and creation<=%s group by item_name order by qty desc''',(from_date, to_date)) |
the-stack_0_23342 | import tkinter as tk
import os
# IMAGEM 250x250
class Tela:
def __init__(self, master):
du = tk.Label(janela, text="By: Eduardo CJ")
du["font"] = ("Arial black", "10")
du.config(bg="#1C1C1C", foreground="red")
du.place(x=790, y=610)
cab = tk.PhotoImage(file="cab.png")
img = tk.Label(janela, image=cab)
img.cab = cab
img.config(bg="#1C1C1C")
img.place(x=0,y=0)
cabecalho = tk.PhotoImage(file="cabecalho.png")
imgg = tk.Label(janela, image=cabecalho)
imgg.cabecalho = cabecalho
imgg.config(bg="#1C1C1C")
imgg.place(x=315, y=45)
antivirus = tk.PhotoImage(file="antivirus.png")
imgg = tk.Label(janela, image=antivirus)
imgg.antivirus = antivirus
imgg.config(bg="#1C1C1C")
imgg.place(x=430, y=585)
####################################################################
#Labels com as imagens
illustrator = tk.PhotoImage(file="illustrator.png")
img1 = tk.Label(janela, image=illustrator)
img1.illustrator = illustrator
img1.config(bg="black")
img1.place(x=300,y=150)
img1.bind("<Button-1>", self.illustrator)
img1.bind("<Enter>", self.illustrator2)
img1.bind("<Leave>", self.illustrator3)
photoshop = tk.PhotoImage(file="photoshop.png")
img2 = tk.Label(janela, image=photoshop)
img2.photoshop = photoshop
img2.config(bg="black")
img2.place(x=500,y=150)
img2.bind("<Button-1>", self.photoshop)
img2.bind("<Enter>", self.photoshop2)
img2.bind("<Leave>", self.photoshop3)
premiere = tk.PhotoImage(file="premiere.png")
img3 = tk.Label(janela, image=premiere)
img3.premiere = premiere
img3.config(bg="black")
img3.place(x=700,y=150)
img3.bind("<Button-1>", self.premiere)
img3.bind("<Enter>", self.premiere2)
img3.bind("<Leave>", self.premiere3)
animate = tk.PhotoImage(file="animate.png")
img4 = tk.Label(janela, image=animate)
img4.animate = animate
img4.config(bg="black")
img4.place(x=300,y=350)
img4.bind("<Button-1>", self.animate)
img4.bind("<Enter>", self.animate2)
img4.bind("<Leave>", self.animate3)
muse = tk.PhotoImage(file="muse.png")
img5 = tk.Label(janela, image=muse)
img5.muse = muse
img5.config(bg="black")
img5.place(x=500,y=350)
img5.bind("<Button-1>", self.muse)
img5.bind("<Enter>", self.muse2)
img5.bind("<Leave>", self.muse3)
xd = tk.PhotoImage(file="xd.png")
img6 = tk.Label(janela, image=xd)
img6.muse = xd
img6.config(bg="black")
img6.place(x=700,y=350)
img6.bind("<Button-1>", self.xd)
img6.bind("<Enter>", self.xd2)
img6.bind("<Leave>", self.xd3)
#Eventos
def illustrator(self, event):
os.popen("explorer https://download1493.mediafire.com/gop4pvt196bg/2pt1y37e07mnjyw/illustrator+cc+2020+-+Player+Noob-20211006T015721Z-001.zip")
def illustrator2(self, event):
self.lb = tk.Label(text="Adobe Illustrator")
self.lb["font"] = ("Arial","17")
self.lb.config(bg="#8A4B08", foreground="black")
self.lb.place(x=293,y=120)
def illustrator3(self, event):
self.lb.destroy()
def photoshop(self, event):
os.popen("explorer https://download1655.mediafire.com/dk6gut10n4ig/stdi3fn7sk7kz79/Adobe+Photoshop+CC+2020-20211006T013819Z-001.zip")
def photoshop2(self, event):
self.lb1 = tk.Label(text="Adobe Photoshop")
self.lb1["font"] = ("Arial","17")
self.lb1.config(bg="#2E9AFE", foreground="black")
self.lb1.place(x=487,y=120)
def photoshop3(self, event):
self.lb1.destroy()
def premiere(self, event):
os.popen("explorer https://download1498.mediafire.com/2ebxx7ppa7bg/39iqqokqik2yfcn/Adobe+Premiere+Pro+CC+2020.rar")
def premiere2(self, event):
self.lb1 = tk.Label(text="Adobe Premiere")
self.lb1["font"] = ("Arial","17")
self.lb1.config(bg="#B404AE", foreground="black")
self.lb1.place(x=690,y=120)
def premiere3(self, event):
self.lb1.destroy()
def animate(self, event):
os.popen("explorer https://download1323.mediafire.com/26dbjukq6fmg/vhyl0svu3rnoscd/Adobe+Animate+2021+-+Flavorzinho-20211006T020846Z-001.zip")
def animate2(self, event):
self.lb1 = tk.Label(text="Adobe Animate")
self.lb1["font"] = ("Arial","17")
self.lb1.config(bg="#DF3A01", foreground="black")
self.lb1.place(x=300,y=320)
def animate3(self, event):
self.lb1.destroy()
def muse(self, event):
os.popen("explorer https://download944.mediafire.com/ulcf03voubig/fwclqwap2hqs1em/Adobe+Muse+CC.rar")
def muse2(self, event):
self.lb1 = tk.Label(text="Adobe Muse")
self.lb1["font"] = ("Arial","17")
self.lb1.config(bg="#AEB404", foreground="black")
self.lb1.place(x=510,y=320)
def muse3(self, event):
self.lb1.destroy()
def xd(self, event):
os.popen("explorer https://download1655.mediafire.com/g2c4ykpqdxgg/uklgqv7f21y558a/Adobe+XD+34.3.12.rar")
def xd2(self, event):
self.lb1 = tk.Label(text="Adobe XD")
self.lb1["font"] = ("Arial","17")
self.lb1.config(bg="#8A0886", foreground="black")
self.lb1.place(x=720,y=320)
def xd3(self, event):
self.lb1.destroy()
janela = tk.Tk()
Tela(janela)
janela.title("Pacote Adobe")
janela.geometry("930x650+130+20")
janela.resizable(width=False, height=False)
janela.config(cursor="hand2", bg="#1C1C1C")
janela.iconbitmap("adobe.ico")
janela.mainloop()
|
the-stack_0_23343 | from api_tests import AnarchoTestCase
from anarcho.models.user import User
from anarcho.models.user_app import UserApp
test_team_user_email = '[email protected]'
test_team_user_name = 'test_name2'
class TeamTest(AnarchoTestCase):
def setUp(self):
AnarchoTestCase.setUp(self)
self.register()
self.login()
self.register(email=test_team_user_email, name=test_team_user_name)
self.create_app()
self.app_key = self.created_app.app_key
self.add_to_team(email=test_team_user_email, app_key=self.app_key, permission='r')
def get_user_app(self):
"""
:rtype: UserApp
"""
user = User.query.filter_by(email=test_team_user_email).first()
if user:
return UserApp.query.filter_by(user_id=user.id).first()
def test_permissions_update(self):
r = self.update_permission(email=test_team_user_email, app_key=self.app_key, permission='w')
self.assert_status_code(r)
user_app = self.get_user_app()
self.assertIsNotNone(user_app, msg='UserApp for {0} not found'.format('[email protected]'))
self.assertTrue(user_app.permission == 'w', msg='Wrong permission after update')
def test_can_not_found_app(self):
r = self.update_permission()
self.assert_status_code(r, 404)
self.assert_error_message(r, 'app_not_found')
def test_permissions_remove(self):
r = self.remove_permission(email=test_team_user_email, app_key=self.app_key)
self.assert_status_code(r)
user_app = self.get_user_app()
self.assertIsNone(user_app, msg='UserApp for {0} not deleted'.format('[email protected]'))
def test_user_can_not_remove_his_permissions(self):
r = self.remove_permission(email=self.test_user_email, app_key=self.app_key)
self.assert_status_code(r, 403)
def test_user_can_not_update_his_permissions(self):
r = self.remove_permission(email=self.test_user_email, app_key=self.app_key)
self.assert_status_code(r, 403)
def test_user_can_not_add_to_app_existing_user(self):
r = self.add_to_team(email=test_team_user_email, app_key=self.app_key, permission='r')
self.assert_status_code(r, 409)
self.assert_error_message(r, 'user_with_current_email_already_exist')
def test_email_format_validation(self):
r = self.add_to_team(email='test3mail.com', app_key=self.app_key, permission='r')
self.assert_status_code(r, 403)
def test_empty_email_validation(self):
r = self.add_to_team(email=' ', app_key=self.app_key, permission='r')
self.assert_status_code(r, 403)
def test_email_length_validation(self):
r = self.add_to_team(email='[email protected]', app_key=self.app_key, permission='r')
self.assert_status_code(r, 403)
def test_add_existing_user_to_team(self):
self.register('[email protected]', 'test_name3')
self.create_app(app_name='test_app2')
self.login()
r = self.add_to_team(email='[email protected]', app_key=self.app_key, permission='r')
self.assert_status_code(r)
def test_add_user_with_insensitive_email_to_team(self):
email = '[email protected]'
self.register(email=email, name='test_name3')
self.create_app(app_name='test_app2')
self.login()
r = self.add_to_team(email=email.lower(), app_key=self.app_key, permission='r')
self.assert_status_code(r)
def test_register_user_after_adding_to_team(self):
email = '[email protected]'
self.login()
r = self.add_to_team(email=email, app_key=self.app_key, permission='r')
self.assert_status_code(r)
r = self.register(email=email, name='test_name4')
self.assert_status_code(r)
r = self.login()
self.assert_status_code(r) |
the-stack_0_23345 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 6 16:01:52 2021
@author: Lukas
"""
import numpy as np
import tensorflow as tf
import strawberryfields as sf
from strawberryfields import ops
import basis
import time
from strawberryfields.utils import random_interferometer
import matplotlib.pyplot as plt
tf.random.set_seed(2021)
np.random.seed(2021)
#Dimension ab der der Fock-Raum abgeschnitten wird (für Simulation)
cutoff_dim = 10
#==============================================================
# Trainingsdaten
#==============================================================
#Bestrafung von nicht erwünschten Eigenschaften der Lösung
reg=1
#Lernrate
lr = 0.025
#Anzahl der Epochen
epochs=200
#Dimension des zu lernenden Unitary
dim_oper = 4
#erzeugt eine zufällige 4x4 Matrix
unit_r = random_interferometer(dim_oper)
print(unit_r)
#fülle den Operator bis zum Cutoff mit der Identität auf
unit_z = np.identity(cutoff_dim, dtype=np.complex128)
unit_z[:dim_oper, :dim_oper] = unit_r
#Spalten der Matrix
zielkets = []
for i in range(dim_oper):
zielkets.append(unit_z[:,i])
zielkets = tf.constant(zielkets, dtype=tf.complex64)
#==============================================================
# Netzparameter
#==============================================================
#Größe des Netzes
in_dim = 1
layers = 15
#==============================================================
eng = sf.Engine('tf', backend_options={"cutoff_dim": cutoff_dim, "batch_size": dim_oper})
#==============================================================
# Initialisierung
#==============================================================
#Erstelle ein Programm mit N qumodes
qnn = sf.Program(in_dim)
# initialisiere Parameter zufällig
weights = basis.init(in_dim, layers)
anzahl = np.prod(weights.shape) # Gesamtzahl an Parametern
#Erstelle einen Array mit symbolischen Variabeln die im QNN verwendet werden
params = np.arange(anzahl).reshape(weights.shape)
params = params.astype(np.str) #Variablen sind einfach numeriert
par = []
for i in params:
par.append(qnn.params(*i))
params = np.array(par)
#symbolischer Parameter für den Input
x_data = qnn.params("input")
#erzeuge Basisvektoren mit der richtigen Dimension
basis_vektoren = np.zeros([dim_oper,cutoff_dim])
np.fill_diagonal(basis_vektoren,1)
#==============================================================
#Baue die Struktur des Netzes auf
with qnn.context as q:
#initialisiert die Basisvektoren
ops.Ket(basis_vektoren) | q
#baut Layer des QNN
for l in range(layers):
basis.layer(params[l], q)
#==============================================================
# Kostenfunktion
#==============================================================
def costfunc(weights):
#Um Tensorflow benutzen zu können muss ein Dictionary zwischen den symbolischen
#Variablen und den Tensorflowvariablen erstellt werden
dictio = {}
for symb, var in zip(params.flatten(), tf.reshape(weights, -1)):
dictio[symb.name] = var
# benutze den Tensorflowsimulator
state = eng.run(qnn, args=dictio).state
#Ausgabe-Ket
ket = state.ket()
#Mittlerer Überlapp
ueberlapp = tf.math.real( tf.einsum('bi,bi->b', tf.math.conj(zielkets),ket) )
loss = tf.abs(tf.reduce_sum(ueberlapp - 1))
#Stelle sicher, dass der Trace des Outputs nahe bei 1 bleibt
#Es wird also bestraft, wenn der Circuit Operationen benutzt
#die für große Rechenfehler sorgen (dazu führen, dass der Anteil an höheren Fockstates zunimmt)
trace = tf.abs(tf.reduce_mean(state.trace()))
cost = loss + reg * (tf.abs(trace - 1) ** 2)
return cost, loss, trace, ket
#==============================================================
# Training
#==============================================================
history = []
start_time = time.time()
#Nutze einen Optimierer von Tensorflow. Genauer gesagt: Adam (arXiv:1412.6980v9)
opt= tf.keras.optimizers.Adam(learning_rate=lr)
# Führe das Training 200 mal durch
for i in range(epochs):
# wenn das Programm gelaufen ist, dann resete die Engine
if eng.run_progs:
eng.reset()
with tf.GradientTape() as tape:
cost, loss, trace, ket = costfunc(weights)
gradients = tape.gradient(cost, weights)
opt.apply_gradients(zip([gradients], [weights]))
history.append(loss)
#alle 10 Schritte
if i % 10 == 0:
print("Epochen: {} Gesamtkosten: {:.4f} Loss: {:.4f} Trace: {:.4f}".format(i, cost, loss, trace))
end_time = time.time()
print("Dauer: ",np.round(end_time-start_time),"Sekunden")
np.save("weights_unitary",weights)
eng.reset()
# %matplotlib inline
plt.plot(history)
plt.ylabel('Kosten')
plt.xlabel('Epochen')
plt.show()
#Teste das QNN durch einen Vergleich des gelernten Operators mit
#dem tatsächlichen Operator
#==============================================================
# Test
#==============================================================
#lade Gewichte
weights=np.load("weights_unitary.npy")
dictio = {}
for symb, var in zip(params.flatten(), tf.reshape(weights, -1)):
dictio[symb.name] = var
# benutze den Tensorflowsimulator
state = eng.run(qnn, args=dictio).state
#Ausgabe-Ket
ket = state.ket()
#Extrahiere aus der Ausgabe den relevanten Teil des Operators
learnt_unitary = ket.numpy().T[:dim_oper, :dim_oper]
#Stelle die beiden Operatoren grafisch dar
#Real und Imaginärteil werden getrennt betrachtet
fig, ax = plt.subplots(1, 4, figsize=(7, 4))
ax[0].matshow(unit_r.real, cmap=plt.get_cmap('Blues'))
ax[1].matshow(unit_r.imag, cmap=plt.get_cmap('Reds'))
ax[2].matshow(learnt_unitary.real, cmap=plt.get_cmap('Blues'))
ax[3].matshow(learnt_unitary.imag, cmap=plt.get_cmap('Reds'))
ax[0].set_xlabel(r'$\mathrm{Re}(U_{Ziel})$')
ax[1].set_xlabel(r'$\mathrm{Im}(U_{Ziel})$')
ax[2].set_xlabel(r'$\mathrm{Re}(U_{gelernt})$')
ax[3].set_xlabel(r'$\mathrm{Im}(U_{gelernt})$')
fig.show() |
the-stack_0_23346 | '''
This module holds filters that can be used in postprocessing a form field.
@author: Gerson Galang
'''
from django import template
from lxml.html.clean import Cleaner
register = template.Library()
@register.filter
def size(value, actualSize):
"""Add the size attribute to the text field."""
value.field.widget.attrs['size'] = actualSize
return value
@register.filter
def parametername_form(value):
"Removes all values of arg from the given string"
return value.replace('/', '_s47_')
@register.filter
def sanitize_html(html, bad_tags=['body']):
"""Removes identified malicious HTML content from the given string."""
if html is None or html == '':
return html
cleaner = Cleaner(style=False, page_structure=True, remove_tags=bad_tags,
safe_attrs_only=False)
return cleaner.clean_html(html)
|
the-stack_0_23347 | """The setup script."""
from setuptools import find_packages, setup
with open("README.md") as readme_file:
readme = readme_file.read()
setup(
name="aiopyarr",
version="master",
author="Robert Hillis",
author_email="[email protected]",
description="An Asynchronous Lidarr, Radarr, Readarr, Sonarr APIs for Python.",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/tkdrob/aiopyarr",
package_data={"aiopyarr": ["py.typed"]},
packages=find_packages(include=["aiopyarr", "aiopyarr*"]),
install_requires=["aiohttp>=3.6.1,<4.0"],
keywords=["aiopyarr", "radarr", "sonarr", "plex"],
license="MIT license",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development :: Libraries :: Python Modules",
],
python_requires=">=3.9",
)
|
the-stack_0_23350 | from typing import Iterable
from .enums import CardSet, CardType, GameTag, State, Step, Zone
PLAYABLE_CARD_TYPES = (
CardType.HERO, CardType.MINION, CardType.SPELL, CardType.WEAPON
)
INITIAL_HERO_SETS = (CardSet.CORE, CardSet.HERO_SKINS)
class Entity:
_args: Iterable[str] = ()
def __init__(self, id):
self.id = id
self.game = None
self.tags = {}
self.initial_creator = 0
self.initial_zone = Zone.INVALID
self._initial_controller = 0
def __repr__(self):
return "%s(id=%r, %s)" % (
self.__class__.__name__, self.id,
", ".join("%s=%r" % (k, getattr(self, k)) for k in self._args)
)
@property
def controller(self):
return self.game.get_player(self.tags.get(GameTag.CONTROLLER, 0))
@property
def initial_controller(self):
return self.game.get_player(
self._initial_controller or self.tags.get(GameTag.CONTROLLER, 0)
)
@property
def type(self):
return self.tags.get(GameTag.CARDTYPE, CardType.INVALID)
@property
def zone(self):
return self.tags.get(GameTag.ZONE, Zone.INVALID)
def _update_tags(self, tags):
for tag, value in tags.items():
if tag == GameTag.CONTROLLER and not self._initial_controller:
self._initial_controller = self.tags.get(GameTag.CONTROLLER, value)
self.tags.update(tags)
def reset(self):
pass
def tag_change(self, tag, value):
self._update_tags({tag: value})
class Game(Entity):
_args = ("players", )
can_be_in_deck = False
def __init__(self, id):
super(Game, self).__init__(id)
self.players = []
self._entities = {}
self.initial_entities = []
self.initial_state = State.INVALID
self.initial_step = Step.INVALID
@property
def entities(self):
yield from self._entities.values()
@property
def current_player(self):
for player in self.players:
if player.tags.get(GameTag.CURRENT_PLAYER):
return player
@property
def first_player(self):
for player in self.players:
if player.tags.get(GameTag.FIRST_PLAYER):
return player
@property
def setup_done(self):
return self.tags.get(GameTag.NEXT_STEP, 0) > Step.BEGIN_MULLIGAN
def get_player(self, value):
for player in self.players:
if value in (player.player_id, player.name):
return player
def in_zone(self, zone):
for entity in self.entities:
if entity.zone == zone:
yield entity
def create(self, tags):
self.tags = dict(tags)
self.initial_state = self.tags.get(GameTag.STATE, State.INVALID)
self.initial_step = self.tags.get(GameTag.STEP, Step.INVALID)
self.register_entity(self)
def register_entity(self, entity):
entity.game = self
self._entities[entity.id] = entity
entity.initial_zone = entity.zone
if isinstance(entity, Player):
self.players.append(entity)
elif not self.setup_done:
self.initial_entities.append(entity)
def reset(self):
for entity in self.entities:
if entity is self:
continue
entity.reset()
def find_entity_by_id(self, id: int):
# int() for LazyPlayer mainly...
id = int(id)
return self._entities.get(id)
class Player(Entity):
_args = ("name", )
UNKNOWN_HUMAN_PLAYER = "UNKNOWN HUMAN PLAYER"
can_be_in_deck = False
def __init__(self, id, player_id, hi, lo, name=None):
super(Player, self).__init__(id)
self.player_id = player_id
self.account_hi = hi
self.account_lo = lo
self.name = name
self.initial_hero_entity_id = 0
def __str__(self):
return self.name or ""
@property
def names(self):
"""
Returns the player's name and real name.
Returns two empty strings if the player is unknown.
AI real name is always an empty string.
"""
if self.name == self.UNKNOWN_HUMAN_PLAYER:
return "", ""
if not self.is_ai and " " in self.name:
return "", self.name
return self.name, ""
@property
def initial_deck(self):
for entity in self.game.initial_entities:
# Exclude entities that aren't initially owned by the player
if entity.initial_controller != self:
continue
# Exclude entities that aren't initially in the deck
if entity.initial_zone != Zone.DECK:
continue
# Exclude entity types that cannot be in the deck
if not entity.can_be_in_deck:
continue
# Allow CREATOR=1 because of monster hunt decks.
# Everything else is likely a false positive.
if entity.initial_creator > 1:
continue
yield entity
@property
def entities(self):
for entity in self.game.entities:
if entity.controller == self:
yield entity
@property
def hero(self):
entity_id = self.tags.get(GameTag.HERO_ENTITY, 0)
if entity_id:
return self.game.find_entity_by_id(entity_id)
else:
# Fallback that should never trigger
for entity in self.in_zone(Zone.PLAY):
if entity.type == CardType.HERO:
return entity
@property
def heroes(self):
for entity in self.entities:
if entity.type == CardType.HERO:
yield entity
@property
def starting_hero(self):
if self.initial_hero_entity_id:
return self.game.find_entity_by_id(self.initial_hero_entity_id)
# Fallback
heroes = list(self.heroes)
if not heroes:
return
return heroes[0]
@property
def is_ai(self):
return self.account_lo == 0
def in_zone(self, zone):
for entity in self.entities:
if entity.zone == zone:
yield entity
class Card(Entity):
_args = ("card_id", )
def __init__(self, id, card_id):
super(Card, self).__init__(id)
self.is_original_entity = True
self.initial_card_id = card_id
self.card_id = card_id
self.revealed = False
@property
def base_tags(self) -> dict:
if not self.card_id:
return {}
from .cardxml import load
db, _ = load()
return db[self.card_id].tags
@property
def can_be_in_deck(self) -> bool:
card_type = self.type
if not card_type:
# If we don't know the card type, assume yes
return True
elif card_type == CardType.HERO:
tags = self.base_tags
return (
tags.get(GameTag.CARD_SET, 0) not in INITIAL_HERO_SETS and
tags.get(GameTag.COLLECTIBLE, 0)
)
return card_type in PLAYABLE_CARD_TYPES
def _capture_card_id(self, card_id, tags):
if self.initial_card_id:
return
transformed_from_card = tags.get(GameTag.TRANSFORMED_FROM_CARD, 0)
if transformed_from_card:
from .cardxml import load_dbf
db, _ = load_dbf()
card = db.get(transformed_from_card)
if card:
self.initial_card_id = card.card_id
return
self.initial_card_id = card_id
def _update_tags(self, tags):
super()._update_tags(tags)
if self.is_original_entity and self.initial_creator is None:
creator = tags.get(GameTag.CREATOR, 0)
if creator:
self.initial_creator = creator
def reveal(self, card_id, tags):
self.revealed = True
self.card_id = card_id
self._capture_card_id(card_id, tags)
if tags.get(GameTag.TRANSFORMED_FROM_CARD, 0):
self.is_original_entity = False
self._update_tags(tags)
def hide(self):
self.revealed = False
def change(self, card_id, tags):
self.is_original_entity = False
self._capture_card_id(card_id, tags)
self.card_id = card_id
self._update_tags(tags)
def reset(self):
self.card_id = None
self.revealed = False
|
the-stack_0_23351 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Models for doing PSF/PRF fitting photometry on image data.
"""
import copy
import itertools
import warnings
import numpy as np
from astropy.nddata import NDData
from astropy.modeling import Parameter, Fittable2DModel
from astropy.utils.exceptions import AstropyWarning
__all__ = ['NonNormalizable', 'FittableImageModel', 'EPSFModel',
'GriddedPSFModel', 'IntegratedGaussianPRF', 'PRFAdapter']
class NonNormalizable(AstropyWarning):
"""
Used to indicate that a :py:class:`FittableImageModel` model is
non-normalizable.
"""
pass
class FittableImageModel(Fittable2DModel):
"""
A fittable 2D model of an image allowing for image intensity scaling
and image translations.
This class takes 2D image data and computes the
values of the model at arbitrary locations (including at intra-pixel,
fractional positions) within this image using spline interpolation
provided by :py:class:`~scipy.interpolate.RectBivariateSpline`.
The fittable model provided by this class has three model parameters:
an image intensity scaling factor (`flux`) which is applied to
(normalized) image, and two positional parameters (`x_0` and `y_0`)
indicating the location of a feature in the coordinate grid on which
the model is to be evaluated.
If this class is initialized with `flux` (intensity scaling factor)
set to `None`, then `flux` is going to be estimated as ``sum(data)``.
Parameters
----------
data : numpy.ndarray
Array containing 2D image.
origin : tuple, None, optional
A reference point in the input image ``data`` array. When origin is
`None`, origin will be set at the middle of the image array.
If `origin` represents the location of a feature (e.g., the position
of an intensity peak) in the input ``data``, then model parameters
`x_0` and `y_0` show the location of this peak in an another target
image to which this model was fitted. Fundamentally, it is the
coordinate in the model's image data that should map to
coordinate (`x_0`, `y_0`) of the output coordinate system on which the
model is evaluated.
Alternatively, when `origin` is set to ``(0,0)``, then model parameters
`x_0` and `y_0` are shifts by which model's image should be translated
in order to match a target image.
normalize : bool, optional
Indicates whether or not the model should be build on normalized
input image data. If true, then the normalization constant (*N*) is
computed so that
.. math::
N \\cdot C \\cdot \\sum\\limits_{i,j} D_{i,j} = 1,
where *N* is the normalization constant, *C* is correction factor
given by the parameter ``normalization_correction``, and
:math:`D_{i,j}` are the elements of the input image ``data`` array.
normalization_correction : float, optional
A strictly positive number that represents correction that needs to
be applied to model's data normalization (see *C* in the equation
in the comments to ``normalize`` for more details).
A possible application for this parameter is to account for aperture
correction. Assuming model's data represent a PSF to be fitted to
some target star, we set ``normalization_correction`` to the aperture
correction that needs to be applied to the model. That is,
``normalization_correction`` in this case should be set to the
ratio between the total flux of the PSF (including flux outside model's
data) to the flux of model's data.
Then, best fitted value of the `flux` model
parameter will represent an aperture-corrected flux of the target star.
fill_value : float, optional
The value to be returned by the `evaluate` or
``astropy.modeling.Model.__call__`` methods
when evaluation is performed outside the definition domain of the
model.
ikwargs : dict, optional
Additional optional keyword arguments to be passed directly to the
`compute_interpolator` method. See `compute_interpolator` for more
details.
oversampling : float or tuple of two floats, optional
The oversampling factor(s) of the model in the ``x`` and ``y`` directions.
If ``oversampling`` is a scalar it will be treated as being the same in both
x and y; otherwise a tuple of two floats will be treated as
``(x_oversamp, y_oversamp)``.
"""
flux = Parameter(description='Intensity scaling factor for image data.',
default=1.0)
x_0 = Parameter(description='X-position of a feature in the image in '
'the output coordinate grid on which the model is '
'evaluated.', default=0.0)
y_0 = Parameter(description='Y-position of a feature in the image in '
'the output coordinate grid on which the model is '
'evaluated.', default=0.0)
def __init__(self, data, flux=flux.default,
x_0=x_0.default, y_0=y_0.default,
normalize=False, normalization_correction=1.0,
origin=None, oversampling=1, fill_value=0.0, ikwargs={}):
self._fill_value = fill_value
self._img_norm = None
self._normalization_status = 0 if normalize else 2
self._store_interpolator_kwargs(ikwargs)
self._set_oversampling(oversampling)
if normalization_correction <= 0:
raise ValueError("'normalization_correction' must be strictly "
"positive.")
self._normalization_correction = normalization_correction
self._data = np.array(data, copy=True, dtype=np.float64)
if not np.all(np.isfinite(self._data)):
raise ValueError("All elements of input 'data' must be finite.")
# set input image related parameters:
self._ny, self._nx = self._data.shape
self._shape = self._data.shape
if self._data.size < 1:
raise ValueError("Image data array cannot be zero-sized.")
# set the origin of the coordinate system in image's pixel grid:
self.origin = origin
if flux is None:
if self._img_norm is None:
self._img_norm = self._compute_raw_image_norm(self._data)
flux = self._img_norm
self._compute_normalization(normalize)
super().__init__(flux, x_0, y_0)
# initialize interpolator:
self.compute_interpolator(ikwargs)
def _compute_raw_image_norm(self, data):
"""
Helper function that computes the uncorrected inverse normalization
factor of input image data. This quantity is computed as the
*sum of all pixel values*.
.. note::
This function is intended to be overriden in a subclass if one
desires to change the way the normalization factor is computed.
"""
return np.sum(self._data, dtype=np.float64)
def _compute_normalization(self, normalize):
"""
Helper function that computes (corrected) normalization factor
of the original image data. This quantity is computed as the
inverse "raw image norm" (or total "flux" of model's image)
corrected by the ``normalization_correction``:
.. math::
N = 1/(\\Phi * C),
where :math:`\\Phi` is the "total flux" of model's image as
computed by `_compute_raw_image_norm` and *C* is the
normalization correction factor. :math:`\\Phi` is computed only
once if it has not been previously computed. Otherwise, the
existing (stored) value of :math:`\\Phi` is not modified as
:py:class:`FittableImageModel` does not allow image data to be
modified after the object is created.
.. note::
Normally, this function should not be called by the
end-user. It is intended to be overriden in a subclass if
one desires to change the way the normalization factor is
computed.
"""
self._normalization_constant = 1.0 / self._normalization_correction
if normalize:
# compute normalization constant so that
# N*C*sum(data) = 1:
if self._img_norm is None:
self._img_norm = self._compute_raw_image_norm(self._data)
if self._img_norm != 0.0 and np.isfinite(self._img_norm):
self._normalization_constant /= self._img_norm
self._normalization_status = 0
else:
self._normalization_constant = 1.0
self._normalization_status = 1
warnings.warn("Overflow encountered while computing "
"normalization constant. Normalization "
"constant will be set to 1.", NonNormalizable)
else:
self._normalization_status = 2
@property
def oversampling(self):
"""
The factor by which the stored image is oversampled. I.e., an input
to this model is multipled by this factor to yield the index into the
stored image.
"""
return self._oversampling
def _set_oversampling(self, value):
"""
This is a private method because it's used in the initializer by the
``oversampling``
"""
try:
value = np.atleast_1d(value).astype(float)
if len(value) == 1:
value = np.repeat(value, 2)
except ValueError:
raise ValueError('Oversampling factors must be float')
if np.any(value <= 0):
raise ValueError('Oversampling factors must be greater than 0')
self._oversampling = value
@property
def data(self):
""" Get original image data. """
return self._data
@property
def normalized_data(self):
""" Get normalized and/or intensity-corrected image data. """
return (self._normalization_constant * self._data)
@property
def normalization_constant(self):
""" Get normalization constant. """
return self._normalization_constant
@property
def normalization_status(self):
"""
Get normalization status. Possible status values are:
- 0: **Performed**. Model has been successfuly normalized at
user's request.
- 1: **Failed**. Attempt to normalize has failed.
- 2: **NotRequested**. User did not request model to be normalized.
"""
return self._normalization_status
@property
def normalization_correction(self):
"""
Set/Get flux correction factor.
.. note::
When setting correction factor, model's flux will be adjusted
accordingly such that if this model was a good fit to some target
image before, then it will remain a good fit after correction
factor change.
"""
return self._normalization_correction
@normalization_correction.setter
def normalization_correction(self, normalization_correction):
old_cf = self._normalization_correction
self._normalization_correction = normalization_correction
self._compute_normalization(normalize=self._normalization_status != 2)
# adjust model's flux so that if this model was a good fit to some
# target image, then it will remain a good fit after correction factor
# change:
self.flux *= normalization_correction / old_cf
@property
def shape(self):
"""A tuple of dimensions of the data array in numpy style (ny, nx)."""
return self._shape
@property
def nx(self):
"""Number of columns in the data array."""
return self._nx
@property
def ny(self):
"""Number of rows in the data array."""
return self._ny
@property
def origin(self):
"""
A tuple of ``x`` and ``y`` coordinates of the origin of the coordinate
system in terms of pixels of model's image.
When setting the coordinate system origin, a tuple of two `int` or
`float` may be used. If origin is set to `None`, the origin of the
coordinate system will be set to the middle of the data array
(``(npix-1)/2.0``).
.. warning::
Modifying `origin` will not adjust (modify) model's parameters
`x_0` and `y_0`.
"""
return (self._x_origin, self._y_origin)
@origin.setter
def origin(self, origin):
if origin is None:
self._x_origin = (self._nx - 1) / 2.0
self._y_origin = (self._ny - 1) / 2.0
elif hasattr(origin, '__iter__') and len(origin) == 2:
self._x_origin, self._y_origin = origin
else:
raise TypeError("Parameter 'origin' must be either None or an "
"iterable with two elements.")
@property
def x_origin(self):
"""X-coordinate of the origin of the coordinate system."""
return self._x_origin
@property
def y_origin(self):
"""Y-coordinate of the origin of the coordinate system."""
return self._y_origin
@property
def fill_value(self):
"""Fill value to be returned for coordinates outside of the domain of
definition of the interpolator. If ``fill_value`` is `None`, then
values outside of the domain of definition are the ones returned
by the interpolator.
"""
return self._fill_value
@fill_value.setter
def fill_value(self, fill_value):
self._fill_value = fill_value
def _store_interpolator_kwargs(self, ikwargs):
"""
This function should be called in a subclass whenever model's
interpolator is (re-)computed.
"""
self._interpolator_kwargs = copy.deepcopy(ikwargs)
@property
def interpolator_kwargs(self):
"""
Get current interpolator's arguments used when interpolator was
created.
"""
return self._interpolator_kwargs
def compute_interpolator(self, ikwargs={}):
"""
Compute/define the interpolating spline. This function can be overriden
in a subclass to define custom interpolators.
Parameters
----------
ikwargs : dict, optional
Additional optional keyword arguments. Possible values are:
- **degree** : int, tuple, optional
Degree of the interpolating spline. A tuple can be used to
provide different degrees for the X- and Y-axes.
Default value is degree=3.
- **s** : float, optional
Non-negative smoothing factor. Default value s=0 corresponds to
interpolation.
See :py:class:`~scipy.interpolate.RectBivariateSpline` for more
details.
Notes
-----
* When subclassing :py:class:`FittableImageModel` for the
purpose of overriding :py:func:`compute_interpolator`,
the :py:func:`evaluate` may need to overriden as well depending
on the behavior of the new interpolator. In addition, for
improved future compatibility, make sure
that the overriding method stores keyword arguments ``ikwargs``
by calling ``_store_interpolator_kwargs`` method.
* Use caution when modifying interpolator's degree or smoothness in
a computationally intensive part of the code as it may decrease
code performance due to the need to recompute interpolator.
"""
from scipy.interpolate import RectBivariateSpline
if 'degree' in ikwargs:
degree = ikwargs['degree']
if hasattr(degree, '__iter__') and len(degree) == 2:
degx = int(degree[0])
degy = int(degree[1])
else:
degx = int(degree)
degy = int(degree)
if degx < 0 or degy < 0:
raise ValueError("Interpolator degree must be a non-negative "
"integer")
else:
degx = 3
degy = 3
if 's' in ikwargs:
smoothness = ikwargs['s']
else:
smoothness = 0
x = np.arange(self._nx, dtype=np.float)
y = np.arange(self._ny, dtype=np.float)
self.interpolator = RectBivariateSpline(
x, y, self._data.T, kx=degx, ky=degy, s=smoothness
)
self._store_interpolator_kwargs(ikwargs)
def evaluate(self, x, y, flux, x_0, y_0, use_oversampling=True):
"""
Evaluate the model on some input variables and provided model
parameters.
Parameters
----------
use_oversampling : bool, optional
Whether to use the oversampling factor to calculate the
model pixel indices. The default is `True`, which means the
input indices will be multipled by this factor.
"""
if use_oversampling:
xi = self._oversampling[0] * (np.asarray(x) - x_0)
yi = self._oversampling[1] * (np.asarray(y) - y_0)
else:
xi = np.asarray(x) - x_0
yi = np.asarray(y) - y_0
xi += self._x_origin
yi += self._y_origin
f = flux * self._normalization_constant
evaluated_model = f * self.interpolator.ev(xi, yi)
if self._fill_value is not None:
# find indices of pixels that are outside the input pixel grid and
# set these pixels to the 'fill_value':
invalid = (((xi < 0) | (xi > self._nx - 1)) |
((yi < 0) | (yi > self._ny - 1)))
evaluated_model[invalid] = self._fill_value
return evaluated_model
class EPSFModel(FittableImageModel):
"""
A subclass of `FittableImageModel`. A fittable ePSF model.
"""
def __init__(self, data, flux=1.0, x_0=0, y_0=0, normalize=True,
normalization_correction=1.0, origin=None, oversampling=1.,
fill_value=0., ikwargs={}):
super().__init__(
data=data, flux=flux, x_0=x_0, y_0=y_0, normalize=normalize,
normalization_correction=normalization_correction, origin=origin,
oversampling=oversampling, fill_value=fill_value, ikwargs=ikwargs)
class GriddedPSFModel(Fittable2DModel):
"""
A fittable 2D model containing a grid PSF models defined at specific
locations that are interpolated to evaluate a PSF at an arbitrary
(x, y) position.
Parameters
----------
data : `~astropy.nddata.NDData`
An `~astropy.nddata.NDData` object containing the grid of
reference PSF arrays. The data attribute must contain a 3D
`~numpy.ndarray` containing a stack of the 2D PSFs (the data
shape should be (N_psf, PSF_ny, PSF_nx)). The meta
attribute must be `dict` containing the following:
* ``'grid_xypos'``: A list of the (x, y) grid positions of
each reference PSF. The order of positions should match
the first axis of the 3D `~numpy.ndarray` of PSFs. In
other words, ``grid_xypos[i]`` should be the (x, y)
position of the reference PSF defined in ``data[i]``.
* ``'oversampling'``: The integer oversampling factor of the
PSF.
The meta attribute may contain other properties such as the
telescope, instrument, detector, and filter of the PSF.
"""
flux = Parameter(description='Intensity scaling factor for the PSF '
'model.', default=1.0)
x_0 = Parameter(description='x position in the output coordinate grid '
'where the model is evaluated.', default=0.0)
y_0 = Parameter(description='y position in the output coordinate grid '
'where the model is evaluated.', default=0.0)
def __init__(self, data, flux=flux.default, x_0=x_0.default,
y_0=y_0.default, fill_value=0.0):
if not isinstance(data, NDData):
raise TypeError('data must be an NDData instance.')
if data.data.ndim != 3:
raise ValueError('The NDData data attribute must be a 3D numpy '
'ndarray')
if 'grid_xypos' not in data.meta:
raise ValueError('"grid_xypos" must be in the nddata meta '
'dictionary.')
if len(data.meta['grid_xypos']) != data.data.shape[0]:
raise ValueError('The length of grid_xypos must match the number '
'of input PSFs.')
if 'oversampling' not in data.meta:
raise ValueError('"oversampling" must be in the nddata meta '
'dictionary.')
if not np.isscalar(data.meta['oversampling']):
raise ValueError('oversampling must be a scalar value')
self.data = np.array(data.data, copy=True, dtype=np.float)
self.meta = data.meta
self.grid_xypos = data.meta['grid_xypos']
self.oversampling = data.meta['oversampling']
self._grid_xpos, self._grid_ypos = np.transpose(self.grid_xypos)
self._xgrid = np.unique(self._grid_xpos) # also sorts values
self._ygrid = np.unique(self._grid_ypos) # also sorts values
if (len(list(itertools.product(self._xgrid, self._ygrid))) !=
len(self.grid_xypos)):
raise ValueError('"grid_xypos" must form a regular grid.')
self._xgrid_min = self._xgrid[0]
self._xgrid_max = self._xgrid[-1]
self._ygrid_min = self._ygrid[0]
self._ygrid_max = self._ygrid[-1]
super().__init__(flux, x_0, y_0)
@staticmethod
def _find_bounds_1d(data, x):
"""
Find the index of the lower bound where ``x`` should be inserted
into ``a`` to maintain order.
The index of the upper bound is the index of the lower bound
plus 2. Both bound indices must be within the array.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D array to search.
x : float
The value to insert.
Returns
-------
index : int
The index of the lower bound.
"""
idx = np.searchsorted(data, x)
if idx == 0:
idx0 = 0
elif idx == len(data): # pragma: no cover
idx0 = idx - 2
else:
idx0 = idx - 1
return idx0
def _find_bounding_points(self, x, y):
"""
Find the indices of the grid points that bound the input
``(x, y)`` position.
Parameters
----------
x, y : float
The ``(x, y)`` position where the PSF is to be evaluated.
Returns
-------
indices : list of int
A list of indices of the bounding grid points.
"""
if not np.isscalar(x) or not np.isscalar(y): # pragma: no cover
raise TypeError('x and y must be scalars')
if (x < self._xgrid_min or x > self._xgrid_max or
y < self._ygrid_min or y > self._ygrid_max): # pragma: no cover
raise ValueError('(x, y) position is outside of the region '
'defined by grid of PSF positions')
x0 = self._find_bounds_1d(self._xgrid, x)
y0 = self._find_bounds_1d(self._ygrid, y)
points = list(itertools.product(self._xgrid[x0:x0 + 2],
self._ygrid[y0:y0 + 2]))
indices = []
for xx, yy in points:
indices.append(np.argsort(np.hypot(self._grid_xpos - xx,
self._grid_ypos - yy))[0])
return indices
@staticmethod
def _bilinear_interp(xyref, zref, xi, yi):
"""
Perform bilinear interpolation of four 2D arrays located at
points on a regular grid.
Parameters
----------
xyref : list of 4 (x, y) pairs
A list of 4 ``(x, y)`` pairs that form a rectangle.
refdata : 3D `~numpy.ndarray`
A 3D `~numpy.ndarray` of shape ``(4, nx, ny)``. The first
axis corresponds to ``xyref``, i.e. ``refdata[0, :, :]`` is
the 2D array located at ``xyref[0]``.
xi, yi : float
The ``(xi, yi)`` point at which to perform the
interpolation. The ``(xi, yi)`` point must lie within the
rectangle defined by ``xyref``.
Returns
-------
result : 2D `~numpy.ndarray`
The 2D interpolated array.
"""
if len(xyref) != 4:
raise ValueError('xyref must contain only 4 (x, y) pairs')
if zref.shape[0] != 4:
raise ValueError('zref must have a length of 4 on the first '
'axis.')
xyref = [tuple(i) for i in xyref]
idx = sorted(range(len(xyref)), key=xyref.__getitem__)
xyref = sorted(xyref) # sort by x, then y
(x0, y0), (_x0, y1), (x1, _y0), (_x1, _y1) = xyref
if x0 != _x0 or x1 != _x1 or y0 != _y0 or y1 != _y1:
raise ValueError('The refxy points do not form a rectangle.')
if not np.isscalar(xi):
xi = xi[0]
if not np.isscalar(yi):
yi = yi[0]
if not x0 <= xi <= x1 or not y0 <= yi <= y1:
raise ValueError('The (x, y) input is not within the rectangle '
'defined by xyref.')
data = np.asarray(zref)[idx]
weights = np.array([(x1 - xi) * (y1 - yi), (x1 - xi) * (yi - y0),
(xi - x0) * (y1 - yi), (xi - x0) * (yi - y0)])
norm = (x1 - x0) * (y1 - y0)
return np.sum(data * weights[:, None, None], axis=0) / norm
def evaluate(self, x, y, flux, x_0, y_0):
"""
Evaluate the `GriddedPSFModel` for the input parameters.
"""
# NOTE: this is needed because the PSF photometry routines input
# length-1 values instead of scalars. TODO: fix the photometry
# routines.
if not np.isscalar(x_0):
x_0 = x_0[0]
if not np.isscalar(y_0):
y_0 = y_0[0]
if (x_0 < self._xgrid_min or x_0 > self._xgrid_max or
y_0 < self._ygrid_min or y_0 > self._ygrid_max):
# position is outside of the grid, so simply use the
# closest reference PSF
self._ref_indices = np.argsort(np.hypot(self._grid_xpos - x_0,
self._grid_ypos - y_0))[0]
self._psf_interp = self.data[self._ref_indices, :, :]
else:
# find the four bounding reference PSFs and interpolate
self._ref_indices = self._find_bounding_points(x_0, y_0)
xyref = np.array(self.grid_xypos)[self._ref_indices]
psfs = self.data[self._ref_indices, :, :]
self._psf_interp = self._bilinear_interp(xyref, psfs, x_0, y_0)
# now evaluate the PSF at the (x_0, y_0) subpixel position on
# the input (x, y) values
psfmodel = FittableImageModel(self._psf_interp,
oversampling=self.oversampling)
return psfmodel.evaluate(x, y, flux, x_0, y_0)
class IntegratedGaussianPRF(Fittable2DModel):
r"""
Circular Gaussian model integrated over pixels. Because it is
integrated, this model is considered a PRF, *not* a PSF (see
:ref:`psf-terminology` for more about the terminology used here.)
This model is a Gaussian *integrated* over an area of ``1`` (in
units of the model input coordinates, e.g. 1 pixel). This is in
contrast to the apparently similar
`astropy.modeling.functional_models.Gaussian2D`, which is the value
of a 2D Gaussian *at* the input coordinates, with no integration.
So this model is equivalent to assuming the PSF is Gaussian at a
*sub-pixel* level.
Parameters
----------
sigma : float
Width of the Gaussian PSF.
flux : float (default 1)
Total integrated flux over the entire PSF
x_0 : float (default 0)
Position of the peak in x direction.
y_0 : float (default 0)
Position of the peak in y direction.
Notes
-----
This model is evaluated according to the following formula:
.. math::
f(x, y) =
\frac{F}{4}
\left[
{\rm erf} \left(\frac{x - x_0 + 0.5}
{\sqrt{2} \sigma} \right) -
{\rm erf} \left(\frac{x - x_0 - 0.5}
{\sqrt{2} \sigma} \right)
\right]
\left[
{\rm erf} \left(\frac{y - y_0 + 0.5}
{\sqrt{2} \sigma} \right) -
{\rm erf} \left(\frac{y - y_0 - 0.5}
{\sqrt{2} \sigma} \right)
\right]
where ``erf`` denotes the error function and ``F`` the total
integrated flux.
"""
flux = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
sigma = Parameter(default=1, fixed=True)
_erf = None
fit_deriv = None
@property
def bounding_box(self):
halfwidth = 4 * self.sigma
return ((int(self.y_0 - halfwidth), int(self.y_0 + halfwidth)),
(int(self.x_0 - halfwidth), int(self.x_0 + halfwidth)))
def __init__(self, sigma=sigma.default,
x_0=x_0.default, y_0=y_0.default, flux=flux.default,
**kwargs):
if self._erf is None:
from scipy.special import erf
self.__class__._erf = erf
super().__init__(n_models=1, sigma=sigma, x_0=x_0, y_0=y_0, flux=flux,
**kwargs)
def evaluate(self, x, y, flux, x_0, y_0, sigma):
"""Model function Gaussian PSF model."""
return (flux / 4 *
((self._erf((x - x_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((x - x_0 - 0.5) / (np.sqrt(2) * sigma))) *
(self._erf((y - y_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((y - y_0 - 0.5) / (np.sqrt(2) * sigma)))))
class PRFAdapter(Fittable2DModel):
"""
A model that adapts a supplied PSF model to act as a PRF. It
integrates the PSF model over pixel "boxes". A critical built-in
assumption is that the PSF model scale and location parameters are
in *pixel* units.
Parameters
----------
psfmodel : a 2D model
The model to assume as representative of the PSF
renormalize_psf : bool
If True, the model will be integrated from -inf to inf and
re-scaled so that the total integrates to 1. Note that this
renormalization only occurs *once*, so if the total flux of
``psfmodel`` depends on position, this will *not* be correct.
xname : str or None
The name of the ``psfmodel`` parameter that corresponds to the
x-axis center of the PSF. If None, the model will be assumed to
be centered at x=0.
yname : str or None
The name of the ``psfmodel`` parameter that corresponds to the
y-axis center of the PSF. If None, the model will be assumed to
be centered at y=0.
fluxname : str or None
The name of the ``psfmodel`` parameter that corresponds to the
total flux of the star. If None, a scaling factor will be
applied by the ``PRFAdapter`` instead of modifying the
``psfmodel``.
Notes
-----
This current implementation of this class (using numerical
integration for each pixel) is extremely slow, and only suited for
experimentation over relatively few small regions.
"""
flux = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
def __init__(self, psfmodel, renormalize_psf=True, flux=flux.default,
x_0=x_0.default, y_0=y_0.default, xname=None, yname=None,
fluxname=None, **kwargs):
self.psfmodel = psfmodel.copy()
if renormalize_psf:
from scipy.integrate import dblquad
self._psf_scale_factor = 1. / dblquad(self.psfmodel,
-np.inf, np.inf,
lambda x: -np.inf,
lambda x: np.inf)[0]
else:
self._psf_scale_factor = 1
self.xname = xname
self.yname = yname
self.fluxname = fluxname
# these can be used to adjust the integration behavior. Might be
# used in the future to expose how the integration happens
self._dblquadkwargs = {}
super().__init__(n_models=1, x_0=x_0, y_0=y_0, flux=flux, **kwargs)
def evaluate(self, x, y, flux, x_0, y_0):
"""The evaluation function for PRFAdapter."""
if self.xname is None:
dx = x - x_0
else:
dx = x
setattr(self.psfmodel, self.xname, x_0)
if self.xname is None:
dy = y - y_0
else:
dy = y
setattr(self.psfmodel, self.yname, y_0)
if self.fluxname is None:
return (flux * self._psf_scale_factor *
self._integrated_psfmodel(dx, dy))
else:
setattr(self.psfmodel, self.yname, flux * self._psf_scale_factor)
return self._integrated_psfmodel(dx, dy)
def _integrated_psfmodel(self, dx, dy):
from scipy.integrate import dblquad
# infer type/shape from the PSF model. Seems wasteful, but the
# integration step is a *lot* more expensive so its just peanuts
out = np.empty_like(self.psfmodel(dx, dy))
outravel = out.ravel()
for i, (xi, yi) in enumerate(zip(dx.ravel(), dy.ravel())):
outravel[i] = dblquad(self.psfmodel,
xi-0.5, xi+0.5,
lambda x: yi-0.5, lambda x: yi+0.5,
**self._dblquadkwargs)[0]
return out
|
the-stack_0_23353 | import unittest
from xml.etree import ElementTree
from fixtodict.fix_version import FixVersion
class TestFixVersionFromString(unittest.TestCase):
def test_40(self):
version = FixVersion("fix.4.0")
self.assertEqual(
version.data,
{"fix": "fix", "major": "4", "minor": "0", "sp": "0"},
)
def test_50SP2EP254(self):
version = FixVersion("fix.5.0SP2", ep="254")
self.assertEqual(
version.data,
{"fix": "fix", "major": "5", "minor": "0", "sp": "2", "ep": "254"},
)
def test_fixt_11(self):
version = FixVersion("fixt.1.1")
self.assertEqual(
version.data,
{"fix": "fixt", "major": "1", "minor": "1", "sp": "0"},
)
def xml_string_to_version(data, prefix):
return FixVersion.create_from_xml_attrs(
ElementTree.fromstring(data).attrib, prefix
).data
class TestFixVersionFromXML(unittest.TestCase):
def test_updated_50SP1EP97(self):
data = """
<Message
updated="FIX.5.0SP1"
updatedEP="97"
added="FIX.4.4"
addedEP="-1">
</Message>
"""
self.assertEqual(
xml_string_to_version(data, "updated"),
{"fix": "fix", "major": "5", "minor": "0", "sp": "1", "ep": "97"},
)
def test_added_44(self):
data = """
<Component
updated="FIX.5.0SP1"
updatedEP="97"
added="FIX.4.4"
addedEP="-1">
</Component>
"""
self.assertEqual(
xml_string_to_version(data, "added"),
{"fix": "fix", "major": "4", "minor": "4", "sp": "0"},
)
|
the-stack_0_23354 | """
This file offers the methods to automatically retrieve the graph Ruminococcaceae bacterium P7.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def RuminococcaceaeBacteriumP7(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Ruminococcaceae bacterium P7 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Ruminococcaceae bacterium P7 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="RuminococcaceaeBacteriumP7",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_23355 | # -*- coding: utf-8 -*-
"""
Created on Fri May 21 09:07:18 2021
@author: fdm
"""
import cv2
import numpy as np
import glob
import ntpath
import os
import re
from PIL import Image
import time
from tool.predictor import Predictor
from tool.config import Cfg
config_all = Cfg.load_config_from_file('./train_config/seq2seq_2906_pretrain_32_10k.yml')
config_all['weights'] = './checkpoint/seq2seq_2906_pretrain_32_10k.pth'
config_all['cnn']['pretrained'] = False
config_all['device'] = 'cuda:1'
# config_all['device'] = 'cuda:1'
config_all['predictor']['beamsearch'] = False
# config_all['vocab'] = '''aAàÀảẢãÃáÁạẠăĂằẰẳẲẵẴắẮặẶâÂầẦẩẨẫẪấẤậẬbBcCdDđĐeEèÈẻẺẽẼéÉẹẸêÊềỀểỂễỄếẾệỆfFgGhHiIìÌỉỈĩĨíÍịỊjJkKlLmMnNoOòÒỏỎõÕóÓọỌôÔồỒổỔỗỖốỐộỘơƠờỜởỞỡỠớỚợỢpPqQrRsStTuUùÙủỦũŨúÚụỤưƯừỪửỬữỮứỨựỰvVwWxXyYỳỲỷỶỹỸýÝỵỴzZ0125456789!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ '''
detector_old = Predictor(config_all)
def no_accent_vietnamese(s):
s = re.sub(r'[àáạảãâầấậẩẫăằắặẳẵ]', 'a', s)
s = re.sub(r'[ÀÁẠẢÃĂẰẮẶẲẴÂẦẤẬẨẪ]', 'A', s)
s = re.sub(r'[èéẹẻẽêềếệểễ]', 'e', s)
s = re.sub(r'[ÈÉẸẺẼÊỀẾỆỂỄ]', 'E', s)
s = re.sub(r'[òóọỏõôồốộổỗơờớợởỡ]', 'o', s)
s = re.sub(r'[ÒÓỌỎÕÔỒỐỘỔỖƠỜỚỢỞỠ]', 'O', s)
s = re.sub(r'[ìíịỉĩ]', 'i', s)
s = re.sub(r'[ÌÍỊỈĨ]', 'I', s)
s = re.sub(r'[ùúụủũưừứựửữ]', 'u', s)
s = re.sub(r'[ƯỪỨỰỬỮÙÚỤỦŨ]', 'U', s)
s = re.sub(r'[ỳýỵỷỹ]', 'y', s)
s = re.sub(r'[ỲÝỴỶỸ]', 'Y', s)
s = re.sub(r'[Đ]', 'D', s)
s = re.sub(r'[đ]', 'd', s)
return s
def util_check_input_img(img_input):
if not isinstance(img_input, (np.ndarray)):
#print('not np array')
return False
if img_input.shape[0] < 10 or img_input.shape[1] < 10:
#print('small image %d %d' %(img_input.shape[0], img_input.shape[1]))
return False
return True
def run_ocr_cnn(img_line):
if not util_check_input_img(img_line):
return ""
is_success, buffer = cv2.imencode('.jpg', img_line)
# print(buffer.tobytes())
str_ocr_val = detector_old.predict_bytes(buffer)
return str_ocr_val
def Repalce(s):
last_symbol = s[len(s)-1]
for j in ['/', '_', ':', ';', '-', '.', ',', '?']:
if last_symbol == j:
s = s.replace(j, '')
for l in ['/', '_', ':', ';', '-', '.', ',', '?']:
s = s.replace(l, '')
s = s.replace(' ', ' ')
return s.strip()
def check_cnn_model(str_img_path, txt_file):
### read and run ocr by cnn model
img_in = cv2.imread(str_img_path)
str_ocr = run_ocr_cnn(img_in)
kq_test = ''
if len(str_ocr)==0 or len(txt_file)==0:
return [0, 0, 0, "false"]
if Repalce(str_ocr).strip() == Repalce(txt_file).strip():
a = 1
else:
a = 0
kq_test = f"\n{str_ocr}\n{txt_file}\n-------------------------"
upperocr = no_accent_vietnamese(Repalce(str_ocr).strip())
uppertxt = no_accent_vietnamese(Repalce(txt_file).strip())
if no_accent_vietnamese(Repalce(str_ocr).strip()) == no_accent_vietnamese(Repalce(txt_file).strip()):
b = 1
else:
b = 0
if upperocr.upper() == uppertxt.upper():
c = 1
else:
c = 0
return [a, b, c, kq_test]
# print(a)
### TO DO: read txt_file
### str_true = read from text file
### compare in 2 level:
### 1. compare str_ocr and str_true
### 2. No accent vnese
## str_non_vnese_ocr = no_accent_vietnamese (str_ocr)
## str_non_vnese_true = no_accent_vietnamese (str_true)
## compare str_non_vnese_ocr and str_non_vnese_true
# check_cnn_model('/home/longhn/Desktop/Anotation/data/0HD_QuanLyTaiKhoan_COLOMBO.pdf_182021042210545568.jpg','CÔNG TY TNHH ĐẦU TƯ VÀ PHÁT TRIỂN COLOMBO')
afile = open('/home/longhn/Annotation_2906/test.txt')
kq = open('/home/longhn/Annotation_2906/kq.txt', 'w', encoding="utf8")
true_11 = 0
true_noacc = 0
true_upper = 0
start = time.time()
for x in afile:
data = x.split("\t")
# print(data[0])
link = '/home/longhn/' + data[0]
text = data[1].strip()
# print(link)
if check_cnn_model(link, text)[3] != '':
kq.write(f"{link} {check_cnn_model(link,text)[3]}")
# kq.write(check_cnn_model(link,text)[3])
true_11 += check_cnn_model(link, text)[0]
true_noacc += check_cnn_model(link, text)[1]
true_upper += check_cnn_model(link, text)[2]
stop = time.time()
print("So sanh 1vs1:",true_11)
print("So sanh ko dau:", true_noacc)
print("So sanh upper:", true_upper)
print("Tong thoi gian:", stop-start)
|
the-stack_0_23357 | import GPy
import libs.utils.gpy_estimation_lib as gpy_estimation_lib
import libs.utils.initialisor as initialisor
import libs.transformations as transfo
import numpy as np
import scipy.stats
import math
from scipy.stats import norm
class CustomGPy():
eps = 0.001
is_zero_mean = False
__slots__ = ['lengthscale_constraint', 'variance_constraint',
'init', 'optim_opts', 'kernel_function', 'mean_function',
'model', 'status', '_input_values', '_output_values', 'loo_mean',
'loo_var', 'MGE', 'AGE', 'untrained_variance', 'untrained_lengthscale',
'fix_noise', 'fix_var', 'profiler', 'do_profiling', 'postvar_options',
'_input_ordinates', '_input_factors', '_output_ordinate', '_output_factor',
'input_transform_type', 'output_transform_type']
def __init__(self,
lengthscale_constraint_class=transfo.Exponent,
variance_constraint_class=transfo.Exponent,
init="classic", stopping_criterion="strict",
optim_scheme=[[10, 3.0], [2, 1.0], [2, 1.0]],
untrained_variance=1, untrained_lengthscale=1,
fix_noise=True, fix_var=False,
profiler=gpy_estimation_lib.analytical_mean_and_variance_optimization,
postvar_options={"value": 0, "type": 'Error'},
input_transform_type='Hypercube',
output_transform_type='Standardize',
do_profiling=True
):
self.input_transform_type = input_transform_type
self.output_transform_type = output_transform_type
self.untrained_variance = untrained_variance
self.untrained_lengthscale = untrained_lengthscale
self.set_parametrization(
lengthscale_constraint_class,
variance_constraint_class,
)
if init in ['brutal', 'classic', 'scaled_anisotropic_init',
'classic_profiled', 'custom']:
self.init = init
else:
raise ValueError('Unknown method : {}'.format(init))
assert not (fix_var and profiler is not None)
self.set_optim_opts(
stopping_criterion=stopping_criterion,
optim_scheme=optim_scheme,
do_profiling=do_profiling,
profiler=profiler
)
assert fix_noise, 'Not implemented yet'
self.fix_noise = fix_noise
self.fix_var = fix_var
self.postvar_options = postvar_options
self.kernel_function = None
self.mean_function = None
self.model = None
self._input_values = None
self._output_values = None
self.loo_mean = None
self.loo_var = None
self.MGE = None
self.AGE = None
self.status = None
# --------------------------------------------------------------------------
def set_optim_opts(self, stopping_criterion, optim_scheme, do_profiling, profiler):
if stopping_criterion == 'strict':
gtol = 10 ** (-20)
bfgs_factor = 10
elif stopping_criterion == 'soft':
gtol = None
bfgs_factor = None
elif stopping_criterion == 'intermediate':
gtol = 10 ** (-14)
bfgs_factor = 10
else:
raise ValueError("Unknown stopping criterion setting : {}.".format(stopping_criterion))
self.profiler = profiler
self.do_profiling = do_profiling
self.optim_opts = {
'optim_scheme': optim_scheme,
'gtol': gtol,
'bfgs_factor': bfgs_factor
}
# --------------------------------------------------------------------------
def set_parametrization(
self,
lengthscale_constraint_class,
variance_constraint_class,
):
self.lengthscale_constraint = lengthscale_constraint_class()
assert (self.lengthscale_constraint.domain == transfo.domains._POSITIVE)
self.variance_constraint = variance_constraint_class()
assert (self.variance_constraint.domain == transfo.domains._POSITIVE)
# --------------------------------------------------------------------------
def set_kernel(self):
# Create a kernel function object with default parametrizations
self.kernel_function\
= GPy.kern.Matern52(input_dim=self._input_values.shape[1],
variance=self.untrained_variance,
lengthscale=self.untrained_lengthscale, ARD=True)
# Set parametrization and/or constraints for the range parameters
self.kernel_function\
.lengthscale.constrain(transform=self.lengthscale_constraint)
# Set parametrization and/or constraints for the variance parameter
self.kernel_function\
.variance.constrain(transform=self.variance_constraint)
if self.fix_var:
self.kernel_function.variance.fix()
def transform_input(self, x):
tiled_ordinates = np.tile(self._input_ordinates, reps=[x.shape[0], 1])
tiled_factors = np.tile(self._input_factors, reps=[x.shape[0], 1])
assert x.shape == tiled_ordinates.shape and x.shape == tiled_factors.shape
return (x - tiled_ordinates)/tiled_factors
def scale_input_back(self, x):
assert x.shape == self._input_factors.shape
return x * self._input_factors
def transform_output(self, y):
assert isinstance(self._output_ordinate, float) and isinstance(self._output_factor, float)
return (y - self._output_ordinate)/self._output_factor
def transform_post_mean(self, y):
assert isinstance(self._output_ordinate, float) and isinstance(self._output_factor, float)
return y*self._output_factor + self._output_ordinate
def transform_post_var(self, var):
assert isinstance(self._output_factor, float)
return (self._output_factor**2) * var
# ---------------------------------------------------------------------------
def set_data(self, input_data, output_data):
assert isinstance(input_data, np.ndarray) and isinstance(output_data, np.ndarray)
assert input_data.ndim == 2 and output_data.ndim == 2
assert input_data.shape[0] == output_data.shape[0] and output_data.shape[1] == 1
if self._input_values is not None:
if input_data.shape[1] != self._input_values.shape[1]:
print('Warning : X dimensionality differs from original data. Cleaning the model.')
self.clean()
if self._input_values is None:
self.store_normalizers(input_data, output_data)
self._input_values = self.transform_input(input_data)
self._output_values = self.transform_output(output_data)
self.check_training_type()
if self.model is None:
self.re_build_model()
else:
self.repair_model()
# ---------------------------------------------------------------------------
def store_normalizers(self, input_data, output_data):
if self.input_transform_type == 'Hypercube':
self._input_ordinates = input_data.min(0)
self._input_factors = input_data.max(0) - input_data.min(0)
elif self.input_transform_type == 'None':
self._input_ordinates = np.zeros(input_data.shape[1])
self._input_factors = np.ones(input_data.shape[1])
elif self.input_transform_type == 'Standardize':
self._input_ordinates = input_data.mean(0)
self._input_factors = input_data.std(0)
else:
raise ValueError(self.input_transform_type)
if self.output_transform_type == 'Standardize':
self._output_ordinate = output_data.mean()
self._output_factor = output_data.std()
elif self.output_transform_type == 'None':
self._output_ordinate = 0.0
self._output_factor = 1.0
else:
raise ValueError(self.output_transform_type)
# ---------------------------------------------------------------------------
def re_build_model(self):
self.check_training_type()
self.mean_function = GPy.mappings.constant.Constant(input_dim=self._input_values.shape[1], output_dim=1, value=0.0)
self.set_kernel()
self.repair_model()
self.loo_mean = None
self.loo_var = None
self.MGE = None
self.AGE = None
self.status = "Untrained"
# ---------------------------------------------------------------------------
def repair_model(self):
self.check_training_type()
self.model = GPy.models.GPRegression(self._input_values, self._output_values, kernel=self.kernel_function,
Y_metadata=None, normalizer=None,
noise_var=0, mean_function=self.mean_function)
if self.fix_noise:
self.model.Gaussian_noise.variance.fix()
self.loo_mean = None
self.loo_var = None
self.MGE = None
self.AGE = None
self.status = "Untrained"
# ---------------------------------------------------------------------------
def initialize(self):
self.check_training_type()
init_opts = {'fix_var': self.fix_var, 'profiler': self.profiler, 'is_zero_mean': self.is_zero_mean}
if self.init == 'scaled_anisotropic_init':
self.model = initialisor.grid_init(self.model, isotropic=False, **init_opts)
elif self.init == 'classic':
self.model = initialisor.std_init(
self.model,
use_dimension=True,
profiler=None,
fix_var=self.fix_var,
is_zero_mean=self.is_zero_mean
)
elif self.init == 'classic_profiled':
self.model = initialisor.std_init(self.model, use_dimension=True, **init_opts)
elif self.init in ['custom', 'brutal']:
pass
else:
raise NotImplementedError('{} init method'.format(self.init))
# ---------------------------------------------------------------------------
def train(self):
if self.init == 'brutal':
self.model, self.status = gpy_estimation_lib.brutal_train(
self.model,
n=self.optim_opts['optim_scheme'][0][0],
profiler=self.profiler
)
else:
self.initialize()
self.check_training_type()
assert not (self.fix_var and self.profiler is not None)
if self.do_profiling:
trainer_profiler = self.profiler
else:
trainer_profiler = None
self.model, self.status = gpy_estimation_lib.trainer(
self.model,
options=self.optim_opts,
profiler=trainer_profiler
)
self.loo_mean = None
self.loo_var = None
self.MGE = None
self.AGE = None
# ---------------------------------------------------------------------------
def predict(self, data):
self.check_testing_type(data)
y_pred, var = self.model.predict(self.transform_input(data))
if np.any(var < self.postvar_options['value']):
if self.postvar_options['type'] == 'Error':
raise ValueError("Variance below threshold : {}".format(self.postvar_options['value']))
elif self.postvar_options['type'] == 'truncate':
var[var < self.postvar_options['value']] = self.postvar_options['value']
elif self.postvar_options['type'] == 'None':
pass
else:
raise ValueError(self.postvar_options['type'])
return self.transform_post_mean(y_pred), self.transform_post_var(var)
# ---------------------------------------------------------------------------
def get_cdf(self, x, data):
y_pred, y_var = self.predict(data)
assert y_pred.shape == y_var.shape, "Shape issue"
assert isinstance(x, float), "x must be float"
y_sd = np.vectorize(math.sqrt)(y_var)
return scipy.stats.norm.cdf((x - y_pred)/y_sd)
# ---------------------------------------------------------------------------
def get_y_quantiles(self, q, data):
y_pred, y_var = self.predict(data)
assert y_pred.shape == y_var.shape, "Shape issue"
assert isinstance(q, float), "x must be float"
return norm.ppf(q, loc=y_pred, scale=np.vectorize(math.sqrt)(y_var))
# ---------------------------------------------------------------------------
def get_gaussian_normalized(self, data, truth):
y_pred, y_var = self.predict(data)
assert truth.shape == y_var.shape and truth.shape == y_pred.shape, "Shape issue"
return (truth - y_pred) / np.vectorize(math.sqrt)(y_var)
# ---------------------------------------------------------------------------
def sample_y(self, data, n_samples=1):
self.check_testing_type(data)
y_pred = self.model.posterior_samples(X=self.transform_input(data), size=n_samples)
return self.transform_post_mean(y_pred)
# ---------------------------------------------------------------------------
def get_loo_mean(self):
if self.loo_mean is None:
self.set_loo_posterior_metrics()
return self.loo_mean
# ---------------------------------------------------------------------------
def get_loo_var(self):
if self.loo_var is None:
self.set_loo_posterior_metrics()
return self.loo_var
# ---------------------------------------------------------------------------
def get_age(self):
if self.AGE is None:
self.set_loo_posterior_metrics()
return self.AGE
# ---------------------------------------------------------------------------
def get_mge(self):
if self.MGE is None:
self.set_loo_posterior_metrics()
return self.MGE
# ---------------------------------------------------------------------------
def set_loo_posterior_metrics(self):
g = self.model.posterior.woodbury_vector
c = self.model.posterior.woodbury_inv
y = self.model.Y_normalized
c_diag = np.diag(c)[:, None]
assert isinstance(g, np.ndarray) and isinstance(c_diag, np.ndarray) \
and isinstance(y, np.ndarray), 'Type issue'
assert g.shape == c_diag.shape and y.shape == g.shape, "Shape issue"
mu = y - g / c_diag
var = 1 / c_diag
self.loo_mean = mu
self.loo_var = var
assert self._output_values.shape == self.loo_mean.shape, "Shape issue"
self.AGE = 100 * np.mean(
abs(self.loo_mean - self._output_values) / (self._output_values.max() - self._output_values.min()))
self.MGE = 100 * np.max(
abs(self.loo_mean - self._output_values) / (self._output_values.max() - self._output_values.min()))
# ---------------------------------------------------------------------------
def check_training_type(self):
if not (isinstance(self._input_values, np.ndarray) and isinstance(self._output_values, np.ndarray)):
raise TypeError("Input and output values should be numpy arrays. They are respectively {}, {}".format(
type(self._input_values), type(self._output_values)))
def check_testing_type(self, data):
if not isinstance(data, np.ndarray):
raise TypeError("Input and output values should be numpy arrays, not {}".format(type(data)))
assert data.shape[1] == self._input_values.shape[1]
# ---------------------------------------------------------------------------
def clean(self):
self.model = None
self.kernel_function = None
self.mean_function = None
self._input_values = None
self._output_values = None
self.loo_mean = None
self.loo_var = None
self.MGE = None
self.AGE = None
self.status = "Untrained"
self.clean_normalization()
# ---------------------------------------------------------------------------
def clean_normalization(self):
self._input_ordinates = None
self._input_factors = None
self._output_ordinate = None
self._output_factor = None
# ---------------------------------------------------------------------------
def __str__(self):
if self.model is not None:
return(self.model.__str__() + "\n" + self.model.kern.lengthscale.__str__())
else:
return("Model unset for now.")
# ---------------------------------------------------------------------------
def get_c_mean(self):
if self.model is None:
raise ValueError("Model is None, data probably hasnt been defined.")
else:
return self.transform_post_mean(self.model.constmap.C.values.copy())
# ---------------------------------------------------------------------------
def get_c_var(self):
if self.model is None:
raise ValueError("Model is None, data probably hasnt been defined.")
else:
return self.transform_post_var(self.model.kern.variance.values.copy())
# ---------------------------------------------------------------------------
def get_ls(self):
if self.model is None:
raise ValueError("Model is None, data probably hasnt been defined.")
else:
std_ls = self.model.kern.lengthscale.values.copy()
return self.scale_input_back(std_ls)
# # ---------------------------------------------------------------------------
# def set_ls(self, value):
# if self.model is None:
# raise ValueError("Model is None, data probably hasnt been defined.")
# else:
# gpy_estimation_lib.set_gpy_model_ls(self.model, value)
# ---------------------------------------------------------------------------
def get_status(self):
if self.status is None:
raise ValueError("Status is None, data probably hasnt been defined.")
else:
return self.status
# ---------------------------------------------------------------------------
def get_objective_value(self):
if self.model is None:
raise ValueError("Model is None, data probably hasnt been defined.")
else:
return self.model.objective_function() + 0.5*self._output_values.shape[0]*math.log(self._output_factor**2)
# ---------------------------------------------------------------------------
def get_rkhs_semi_norm(self):
raise NotImplementedError("Be carefull, m may not be the IK's one")
# ---------------------------------------------------------------------------
def plot_warping(self):
pass
# ---------------------------------------------------------------------------
def to_dict(self):
raise NotImplementedError
# model_dict = {}
# for k in ['param', 'init', 'gtol', 'bfgs_factor', 'do_restarts', 'restart_sessions_limit', 'num_restarts',
# 'analytical_mu_and_sigma2_optimization', 'end_analytical_mu_and_sigma2_optimization',
# 'status']:
# model_dict[k] = self.__getattribute__(k)
#
# model_dict['model'] = self.model.to_dict()
#
# return model_dict
# ---------------------------------------------------------------------------
def set_model_from_dict(self, d):
raise NotImplementedError
# for k in ['param', 'init', 'gtol', 'bfgs_factor', 'do_restarts', 'restart_sessions_limit', 'num_restarts',
# 'analytical_mu_and_sigma2_optimization', 'end_analytical_mu_and_sigma2_optimization',
# 'status']:
# self.__setattr__(k, d[k])
#
# self.model = GPy.models.GPRegression.from_dict(d['model'])
#
# self.kernel_function = self.model.kern
# self.mean_function = self.model.mean_function
# self._input_values = self.model.X
# self._output_values = self.model.Y
# self.set_loo_posterior_metrics()
# ---------------------------------------------------------------------------
@staticmethod
def default_args():
return {'init': "classic",
'stopping_criterion': "strict",
'do_profiling': True,
'do_restarts': True,
'do_end_profiling': True,
'n_multistarts': 2,
'n_iterations': 3}
# ---------------------------------------------------------------------------
@staticmethod
def default_setup():
return CustomGPy(**CustomGPy.default_args())
|
the-stack_0_23358 | import tempfile
from os import path
import pytest
from dateutil import parser
from jinja2.loaders import PackageLoader
from jtex.TemplateRenderer import TemplateRenderer
def test_not_initialized_on_construction():
renderer = TemplateRenderer()
assert renderer.jinja is None
@pytest.fixture(name="renderer")
def _renderer():
renderer = TemplateRenderer()
renderer.use_loader(PackageLoader("jtex", "builtin_template"))
return renderer
def test_has_environment(renderer):
assert renderer.jinja is not None
def test_syntax_variable(renderer):
T = "lorem ipsum [-TITLE-] lorem ipsum"
output = renderer.render_from_string(T, dict(TITLE="ABC"))
assert output == "lorem ipsum ABC lorem ipsum"
def test_syntax_block(renderer):
T = r"[# for _ in items #]ABC[# endfor #]"
output = renderer.render_from_string(T, dict(x="y"))
assert output == ""
output = renderer.render_from_string(T, dict(x="y", items=[0, 1, 2]))
assert output == "ABCABCABC"
def test_syntax_block_var(renderer):
T = r"[# for i in items #]ABC[-i-][# endfor #]"
output = renderer.render_from_string(T, dict(x="y", items=[0, 1, 2]))
assert output == "ABC0ABC1ABC2"
def test_syntax_inline_comment(renderer):
T = r"just %% not this"
output = renderer.render_from_string(T, dict(x="y"))
assert output == "just"
def test_syntax_comment(renderer):
T = r"just %# not this #% this"
output = renderer.render_from_string(T, dict(x="y"))
assert output == "just this"
def test_syntax_zip(renderer):
T = r"[# for a, b in zip(A,B)#][-a-][-b-]|[# endfor #]"
output = renderer.render_from_string(T, dict(A=["x", "y", "z"], B=[1, 2, 3]))
assert output == "x1|y2|z3|"
def test_use_filesystem_template():
with tempfile.TemporaryDirectory() as tmp:
a = open(path.join(tmp, "a.tex"), "w")
a.close()
b = open(path.join(tmp, "b.tex"), "w")
b.close()
renderer = TemplateRenderer()
renderer.use_from_folder(tmp)
assert renderer.list_templates() == ["a.tex", "b.tex"]
def test_rendering(renderer):
"""
Rendering using the default package loader
"""
assert renderer.list_templates() == ["template.tex"]
data = dict(
doc=dict(
oxalink="https://curvenote.com",
title="A Paper",
authors=[dict(name=name) for name in ["Curve Note", "Io Oxa"]],
date=parser.parse("6/18/2021"),
),
tagged=dict(abstract="Lorem ispum"),
curvenote=dict(defs="\\input{curvenote.def}"),
)
output = renderer.render(data, content="Lorem ipsum blahdium...")
assert r"\newcommand{\logo}{" in output
assert r"Curve Note \and Io Oxa" in output
assert r"\title{A Paper}" in output
assert r"\newdate{articleDate}{18}{6}{2021}" in output
assert r"Lorem ipsum blahdium..." in output
|
the-stack_0_23360 | import logging
import tkinter
class TKHandler(logging.Handler):
def __init__(self, console=None):
logging.Handler.__init__(self)
self.console = console # must be a text widget of some kind.
def emit(self, message):
formattedMessage = self.format(message)
if self.console:
self.console.configure(state=tkinter.NORMAL)
self.console.insert(tkinter.END, formattedMessage + '\n')
self.console.configure(state=tkinter.DISABLED)
self.console.see(tkinter.END)
self.console.update()
print(formattedMessage)
_logger = logging.getLogger("ANRS Logger")
_handler = TKHandler()
_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
_handler.setFormatter(_formatter)
_logger.addHandler(_handler)
_handler.setLevel(logging.DEBUG)
_logger.setLevel(logging.DEBUG)
def logger():
return _logger
def setLoggerConsole(console):
_handler.console = console
|
the-stack_0_23361 | '''
This script exemplifies how to create classes and class objects.
For more details:
https://docs.python.org/3/tutorial/classes.html
'''
# Classes are very useful to create in a sense a data type for which we can define many functionality.
# The most typical example would be defining a student class:
class Student:
def __init__(self, name, surname, age, major, gpa):
self.name = name
self.surname = surname
self.age = age
self.major = major
self.gpa = gpa
# Within the class, we can create multiple functions. These functions can be reached by the object that we create
# from this class.
def checkGPADesignation(self, gpa):
if (gpa >= 3.5) & (gpa < 3.75):
print('Cum Laude!')
elif (gpa >= 3.75) & (gpa < 4.00):
print('Magna Cum Laude!')
elif (gpa == 4.00):
print('Summa Cum Laude!')
else:
print('No designation!')
def getStudentName(self):
return self.name
def setStudentGPA(self,new_gpa):
self.gpa = new_gpa
# Creating a class object:
studentObj1 = Student(name = 'Adam', surname = 'Taylor', age = '20', major = 'ECE', gpa = 3.63)
# The same object can be created without using the variable names:
# studentObj1 = Student('Adam', 'Taylor', '20', 'ECE', 3.63)
'''
Note: If the created class is in another python file, then we need to first import the class in the script we want to use it.
We would type:
from PythonFileName import Student
obj = Student(...)
'''
# We can use the methods defined in the class using the object we created:
studentName = studentObj1.getStudentName()
print('The name of the student 1 is: ', studentName, '\n')
# The information of student can also be reached using objectName.varName:
studentGPA = studentObj1.gpa
print('The GPA of student 1 is: ', studentGPA, '\n')
# Lets check if we have any honor designation for this GPA:
studentObj1.checkGPADesignation(studentGPA)
# We can also change an object value by using set methods that we can create within the class:
studentObj1.setStudentGPA(3.8)
# Lets check again the honor designation for the updated GPA:
studentObj1.checkGPADesignation(studentObj1.gpa) |
the-stack_0_23363 | import boto3
import json
import os
def handler(body, req):
if (body is None or body['key'] is None or body['object'] is None):
raise Exception('Invalid request body.')
session = boto3.Session(
aws_access_key_id=os.environ['AWS_ACCESS_KEY'],
aws_secret_access_key=os.environ['AWS_SECRET_KEY'],
)
bucket = os.environ['S3_BUCKET_NAME']
key = body['key']
s3_file = body['object']
s3 = session.resource('s3').Bucket(bucket)
s3.Object(key=key).put(Body=json.dumps(s3_file))
return 'File available at bucket %s with key %s' % (bucket, key)
|
the-stack_0_23366 | # 6713
# (<(!--.*|script)(.|\n[^<])*(--|script)>)|(<|<)(/?[\w!?]+)\s?[^<]*(>|>)|(\&[\w]+\;)
# POLYNOMIAL
# nums:4
# POLYNOMIAL AttackString:"<!----><1>"+"&"*5000+"!1 _SLQ_2"
import re
from time import perf_counter
regex = """(<(!--.*|script)(.|\n[^<])*(--|script)>)|(<|<)(/?[\w!?]+)\s?[^<]*(>|>)|(\&[\w]+\;)"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "<!----><1>" + "&" * i * 10000 + "!1 _SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") |
the-stack_0_23368 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": train.MomentumOptimizer,
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
OPTIMIZER_SUMMARIES = [
"learning_rate",
"loss",
"gradients",
"gradient_norm",
]
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None,
summaries=None,
colocate_gradients_with_ops=False,
increment_global_step=True):
"""Given loss and parameters for optimizer, returns a training op.
Various ways of passing optimizers, include:
- string, name of the optimizer like 'SGD', 'Adam', see OPTIMIZER_CLS_NAMES
for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
- function, takes learning rate `Tensor` as argument and must return
`Optimizer` instance. E.g. `optimize_loss(...,
optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5))`.
Alternatively, if `learning_rate` is `None`, the function takes no
arguments. E.g. `optimize_loss(..., learning_rate=None,
optimizer=lambda: tf.train.MomentumOptimizer(0.5, momentum=0.5))`.
- class, subclass of `Optimizer` that takes only one required argument -
learning rate, such as AdamOptimizer, AdagradOptimizer.
E.g. `optimize_loss(..., optimizer=tf.train.AdagradOptimizer)`.
- object, instance of subclass of `Optimizer`.
E.g., `optimizer_loss(..., optimizer=tf.train.AdagradOptimizer(0.5))`.
Args:
loss: Scalar `Tensor`.
global_step: Scalar int `Tensor`, step counter to update on each step
unless `increment_global_step` is `False`. If not supplied,
it will be fetched from the default graph (see
`tf.train.get_global_step` for details). If it's
not been created, no step will be incremented with each weight
update. `learning_rate_decay_fn` requires `global_step`.
learning_rate: float or `Tensor`, magnitude of update per each training
step. Can be `None`.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of `tf.Optimizer` that implements
`compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantiation of `tf.Optimizer`
sub-class and have `compute_gradients` and `apply_gradients`
functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats.
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float, callable or `None`. If float, is provided, a global
clipping is applied to prevent the norm of the gradient to exceed this
value. Alternatively, a callable can be provided e.g.: adaptive_clipping.
This callable takes a `list` of `(gradients, variables)` `tuple`s and
returns the same thing with the gradients modified.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: `tf.train.exponential_decay`.
Ignored if `learning_rate` is not supplied.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection. The order of execution
between `update_ops` and `loss` is non-deterministic.
variables: list of variables to optimize or
`None` to use all trainable variables.
name: The name for this operation is used to scope operations and summaries.
summaries: List of internal quantities to visualize on tensorboard. If not
set only the loss and the learning rate will be reported. The
complete list is in OPTIMIZER_SUMMARIES.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
increment_global_step: Whether to increment `global_step`. If your model
calls `optimize_loss` multiple times per training step (e.g. to optimize
different parts of the model), use this arg to avoid incrementing
`global_step` more times than necessary.
Returns:
Training op.
Raises:
ValueError: if:
* `loss` is an invalid type or shape.
* `global_step` is an invalid type or shape.
* `learning_rate` is an invalid type or value.
* `optimizer` is wrong type.
* `clip_gradients` is not float or callable.
* `learning_rate` and `learning_rate_decay_fn` are supplied, but no
`global_step` is available.
* `gradients` is empty
"""
loss = ops.convert_to_tensor(loss)
contrib_framework.assert_scalar(loss)
if global_step is None:
global_step = contrib_framework.get_global_step()
else:
contrib_framework.assert_global_step(global_step)
with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]):
# Update ops take UPDATE_OPS collection if not provided.
if update_ops is None:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
# Make sure update ops are ran before computing loss.
if update_ops:
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
# Learning rate variable, with possible decay.
lr = None
if learning_rate is not None:
if (isinstance(learning_rate, ops.Tensor) and
learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
if learning_rate < 0.0:
raise ValueError("Invalid learning_rate %s.", learning_rate)
lr = vs.get_variable(
"learning_rate", [],
trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" % (str(learning_rate),
str(type(learning_rate))))
if summaries is None:
summaries = ["loss", "learning_rate"]
else:
for summ in summaries:
if summ not in OPTIMIZER_SUMMARIES:
raise ValueError("Summaries should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_SUMMARIES), summ))
if learning_rate is not None and learning_rate_decay_fn is not None:
if global_step is None:
raise ValueError("global_step is required for learning_rate_decay_fn.")
lr = learning_rate_decay_fn(lr, global_step)
if "learning_rate" in summaries:
summary.scalar("learning_rate", lr)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is string (%s)." % optimizer)
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif (isinstance(optimizer, type) and
issubclass(optimizer, optimizer_.Optimizer)):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is class (%s)." % optimizer)
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
elif callable(optimizer):
if learning_rate is not None:
opt = optimizer(lr)
else:
opt = optimizer()
if not isinstance(opt, optimizer_.Optimizer):
raise ValueError("Unrecognized optimizer: function should return "
"subclass of Optimizer. Got %s." % str(opt))
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer, instance of "
"subclass of Optimizer or function with one argument. "
"Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss,
variables,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(gradients,
gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
if not gradients:
raise ValueError(
"Empty list of (gradient, var) pairs encountered. This is most "
"likely to be caused by an improper value of gradient_multipliers.")
if "gradient_norm" in summaries:
summary.scalar("global_norm/gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Optionally clip gradients by global norm.
if isinstance(clip_gradients, float):
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError(
"Unknown type %s for clip_gradients" % type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
if "gradients" in summaries:
summary.histogram("gradients/%s" % var_name, grad_values)
if "gradient_norm" in summaries:
summary.scalar("gradient_norm/%s" % var_name,
clip_ops.global_norm([grad_values]))
if clip_gradients is not None and "gradient_norm" in summaries:
summary.scalar("global_norm/clipped_gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Create gradient updates.
grad_updates = opt.apply_gradients(
gradients,
global_step=global_step if increment_global_step else None,
name="train")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
return list(zip(clipped_gradients, variables))
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
"""Find max_norm given norm and previous average."""
with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
log_norm = math_ops.log(norm + epsilon)
def moving_average(name, value, decay):
moving_average_variable = vs.get_variable(
name,
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.to_float(global_step)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
def adaptive_clipping_fn(std_factor=2.,
decay=0.95,
static_max_norm=None,
global_step=None,
report_summary=False,
epsilon=1e-8,
name=None):
"""Adapt the clipping value using statistics on the norms.
Implement adaptive gradient as presented in section 3.2.1 of
https://arxiv.org/abs/1412.1602.
Keeps a moving average of the mean and std of the log(norm) of the gradient.
if the norm exceeds `exp(mean + std_factor*std)`, all gradients are rescaled
such that the global norm becomes `exp(mean)`.
Args:
std_factor: Python scaler (or tensor).
`max_norm = exp(mean + std_factor*std)`
decay: The smoothing factor of the moving averages.
static_max_norm: If provided, will threshold the norm to this value as an
extra safety.
global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.
This provides a quicker adaptation of the mean for the first steps.
report_summary: If `True`, will add histogram summaries of the `max_norm`.
epsilon: Small value chosen to avoid zero variance.
name: The name for this operation is used to scope operations and summaries.
Returns:
A function for applying gradient clipping.
"""
def gradient_clipping(grads_and_vars):
"""Internal function for adaptive clipping."""
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm,
array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
clipped_grads.append(
ops.IndexedSlices(grad.values * factor, grad.indices,
grad.dense_shape))
else:
clipped_grads.append(grad * factor)
return list(zip(clipped_grads, variables))
return gradient_clipping
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if gradient is None:
noisy_gradients.append(None)
continue
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if (grad is not None and
(var in gradient_multipliers or var.name in gradient_multipliers)):
key = var if var in gradient_multipliers else var.name
multiplier = constant_op.constant(
gradient_multipliers[key], dtype=dtypes.float32)
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values * multiplier
grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
|
the-stack_0_23369 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for checks_test_lib."""
from grr.lib import flags
from grr.lib import parsers
from grr.lib import test_lib
from grr.lib.checks import checks
from grr.lib.checks import checks_test_lib
from grr.lib.rdfvalues import anomaly as rdf_anomaly
from grr.lib.rdfvalues import client as rdf_client
class CheckHelperTests(checks_test_lib.HostCheckTest):
"""Tests for common Check Helper methods."""
def testAssertCheckUndetected(self):
"""Tests for the asertCheckUndetected() method."""
anomaly = {
"finding": ["Adware 2.1.1 is installed"],
"symptom": "Found: Malicious software.",
"type": "ANALYSIS_ANOMALY"
}
# Simple no anomaly case.
no_anomaly = {"SW-CHECK": checks.CheckResult(check_id="SW-CHECK")}
self.assertCheckUndetected("SW-CHECK", no_anomaly)
# The case were there is an anomaly in the results, just not the check
# we are looking for.
other_anomaly = {
"SW-CHECK":
checks.CheckResult(check_id="SW-CHECK"),
"OTHER":
checks.CheckResult(
check_id="OTHER", anomaly=rdf_anomaly.Anomaly(**anomaly))
}
self.assertCheckUndetected("SW-CHECK", other_anomaly)
# Check the simple failure case works.
has_anomaly = {
"SW-CHECK":
checks.CheckResult(
check_id="SW-CHECK", anomaly=rdf_anomaly.Anomaly(**anomaly))
}
self.assertRaises(AssertionError, self.assertCheckUndetected, "SW-CHECK",
has_anomaly)
def testAssertRanChecks(self):
"""Tests for the assertRanChecks() method."""
no_checks = {}
some_checks = {"EXISTS": checks.CheckResult(check_id="EXISTS")}
self.assertRanChecks(["EXISTS"], some_checks)
self.assertRaises(AssertionError, self.assertRanChecks, ["EXISTS"],
no_checks)
self.assertRaises(AssertionError, self.assertRanChecks, ["FOOBAR"],
some_checks)
def testAssertChecksNotRun(self):
"""Tests for the assertChecksNotRun() method."""
no_checks = {}
some_checks = {"EXISTS": checks.CheckResult(check_id="EXISTS")}
self.assertChecksNotRun(["FOOBAR"], no_checks)
self.assertChecksNotRun(["FOO", "BAR"], no_checks)
self.assertChecksNotRun(["FOOBAR"], some_checks)
self.assertChecksNotRun(["FOO", "BAR"], some_checks)
self.assertRaises(AssertionError, self.assertChecksNotRun, ["EXISTS"],
some_checks)
self.assertRaises(AssertionError, self.assertChecksNotRun,
["FOO", "EXISTS", "BAR"], some_checks)
def testAssertCheckDetectedAnom(self):
"""Tests for the assertCheckDetectedAnom() method."""
# Check we fail when our checkid isn't in the results.
no_checks = {}
self.assertRaises(
AssertionError,
self.assertCheckDetectedAnom,
"UNICORN",
no_checks,
sym=None,
findings=None)
# Check we fail when our checkid is in the results but hasn't
# produced an anomaly.
passing_checks = {"EXISTS": checks.CheckResult(check_id="EXISTS")}
self.assertRaises(
AssertionError,
self.assertCheckDetectedAnom,
"EXISTS",
passing_checks,
sym=None,
findings=None)
# On to a 'successful' cases.
anomaly = {
"finding": ["Finding"],
"symptom": "Found: An issue.",
"type": "ANALYSIS_ANOMALY"
}
failing_checks = {
"EXISTS":
checks.CheckResult(
check_id="EXISTS", anomaly=rdf_anomaly.Anomaly(**anomaly))
}
# Check we pass when our check produces an anomaly and we don't care
# about the details.
self.assertCheckDetectedAnom(
"EXISTS", failing_checks, sym=None, findings=None)
# When we do care only about the 'symptom'.
self.assertCheckDetectedAnom(
"EXISTS", failing_checks, sym="Found: An issue.", findings=None)
# And when we also care about the findings.
self.assertCheckDetectedAnom(
"EXISTS", failing_checks, sym="Found: An issue.", findings=["Finding"])
# And check we match substrings of a 'finding'.
self.assertCheckDetectedAnom(
"EXISTS", failing_checks, sym="Found: An issue.", findings=["Fin"])
# Check we complain when the symptom doesn't match.
self.assertRaises(
AssertionError,
self.assertCheckDetectedAnom,
"EXISTS",
failing_checks,
sym="wrong symptom",
findings=None)
# Check we complain when the symptom matches but the findings don't.
self.assertRaises(
AssertionError,
self.assertCheckDetectedAnom,
"EXISTS",
failing_checks,
sym="Found: An issue.",
findings=["Not found"])
# Lastly, if there is a finding in the anomaly we didn't expect, we consider
# that a problem.
self.assertRaises(
AssertionError,
self.assertCheckDetectedAnom,
"EXISTS",
failing_checks,
sym="Found: An issue.",
findings=[])
def testGenProcessData(self):
"""Test for the GenProcessData() method."""
# Trivial empty case.
art_name = "ListProcessesGrr"
context = "RAW"
result = self.GenProcessData([])
self.assertTrue("KnowledgeBase" in result)
self.assertTrue(art_name in result)
self.assertDictEqual(self.SetArtifactData(), result[art_name])
# Now with data.
result = self.GenProcessData([("proc1", 1, ["/bin/foo"]), ("proc2", 2,
["/bin/bar"])])
self.assertEquals("proc1", result[art_name][context][0].name)
self.assertEquals(1, result[art_name][context][0].pid)
self.assertEquals(["/bin/foo"], result[art_name][context][0].cmdline)
self.assertEquals("proc2", result[art_name][context][1].name)
self.assertEquals(2, result[art_name][context][1].pid)
self.assertEquals(["/bin/bar"], result[art_name][context][1].cmdline)
def testGenFileData(self):
"""Test for the GenFileData() method."""
# Need a parser
self.assertRaises(test_lib.Error, self.GenFileData, "EMPTY", [])
# Trivial empty case.
parser = parsers.FileParser()
result = self.GenFileData("EMPTY", [], parser)
self.assertTrue("KnowledgeBase" in result)
self.assertTrue("EMPTY" in result)
self.assertDictEqual(self.SetArtifactData(), result["EMPTY"])
# Now with data.
result = self.GenFileData("FILES",
{"/tmp/foo": """blah""",
"/tmp/bar": """meh"""}, parser)
self.assertTrue("FILES" in result)
# No parser information should be generated.
self.assertEquals([], result["FILES"]["PARSER"])
# Two stat entries under raw (stat entries should exist)
self.assertEquals(2, len(result["FILES"]["RAW"]))
# Walk the result till we find the item we want.
# This is to avoid a flakey test.
statentry = None
for r in result["FILES"]["RAW"]:
if r.pathspec.path == "/tmp/bar":
statentry = r
self.assertIsInstance(statentry, rdf_client.StatEntry)
self.assertEquals(33188, statentry.st_mode)
def testGenSysVInitData(self):
"""Test for the GenSysVInitData() method."""
# Trivial empty case.
result = self.GenSysVInitData([])
self.assertTrue("KnowledgeBase" in result)
self.assertTrue("LinuxServices" in result)
self.assertDictEqual(self.SetArtifactData(), result["LinuxServices"])
# Now with data.
result = self.GenSysVInitData(["/etc/rc2.d/S99testing"])
self.assertTrue("LinuxServices" in result)
self.assertEquals(1, len(result["LinuxServices"]["PARSER"]))
result = result["LinuxServices"]["PARSER"][0]
self.assertEquals("testing", result.name)
self.assertEquals([2], result.start_on)
self.assertTrue(result.starts)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
the-stack_0_23371 | from .models import *
from django.shortcuts import render
from django.core.handlers.wsgi import WSGIRequest
from django.http import JsonResponse, HttpResponse
from django.db import connection
from .utilities import reconstruct_params, post_request_to_dict_slicer, values_from_dict_by_keys, smart_int, \
null_check, reconstruct_args
import datetime
from .forms import UserForm
from django.shortcuts import redirect
from .views_kernel import add_student
from django.db.utils import IntegrityError
import sys
import os
import cspc.settings
import cspcapp.constants
from .views_kernel import superuser_only
# Configuration Block
DELETE_RENAMING = {'payment': 'contract_payment'}
# Edit Block
@superuser_only
def data_edit(request: WSGIRequest, object_type: str) -> HttpResponse:
if object_type in RELINKS_FOR_EDIT:
object_type = RELINKS_FOR_EDIT[object_type]
config = MODEL_TYPES_DICT[object_type]
params = post_request_to_dict_slicer(request.POST)
editing_object = config.type_name.objects.get(pk=request.POST['id'])
del params['csrfmiddlewaretoken']
del params['id']
reconstruct_args(params=params, to_date=config.to_date, date_to_timestamp=config.date_to_timestamp,
to_time=config.to_time)
for i, j in params.items():
rsetattr(editing_object, i, j)
if hasattr(editing_object, 'change_user'):
editing_object.change_user = request.user
editing_object.save()
return JsonResponse({})
# Add Block
@superuser_only
def course_detail_add(request: WSGIRequest) -> JsonResponse:
# data = dict(request.POST)
CourseElementDefiniteClass.objects.create(class_dt=datetime.date(int(request.POST['class_dt_year']),
int(request.POST['class_dt_month']),
int(request.POST['class_dt_day'])),
start_tm=datetime.time(int(request.POST['start_tm_hour']),
int(request.POST['start_tm_minute'])),
end_tm=datetime.time(int(request.POST['end_tm_hour']),
int(request.POST['end_tm_minute'])),
course_element_id=int(request.POST['course_element_id']))
return JsonResponse({})
# Delete BLock
@superuser_only
def object_delete(request: WSGIRequest, object_type: str) -> HttpResponse:
id = request.POST['id']
try:
if object_type == 'contract':
contract = Contract.objects.get(pk=id)
date = request.POST['delete_date']
if date is '':
date = datetime.datetime.now().date()
ContractTermination.objects.create(contract=contract, termination_dt=date,
termination_reason_txt=request.POST['delete_reason'])
contract.delete(user=request.user)
else:
MODEL_TYPES_DICT[object_type].type_name.objects.get(pk=id).delete(user=request.user)
except IntegrityError:
exc_type, value, exc_traceback = sys.exc_info()
return JsonResponse({
'success': False,
'error_msg': f"Ошибка: есть зависимости, не подлежащие автоматическому удалению\n\n\n{value}"
})
except HasRelatedObjectsException as error:
res = 'Ошибка: есть зависимости, не подлежащие автоматическому удалению:'
for i in error.relations_set:
res += f"\n{i[0].ru_localization} -> {i[1].ru_localization}"
return JsonResponse({'success': False, 'error_msg': res})
return JsonResponse({'success': True})
@superuser_only
def new_user(request: WSGIRequest) -> JsonResponse:
_new_user = User.objects.create_user(username=request.POST['username'], password=request.POST['password'],
first_name=request.POST['name'], last_name=request.POST['surname'])
new_person = Person(person_surname_txt=request.POST['surname'],
person_name_txt=request.POST['name'],
person_father_name_txt=request.POST['father_name'],
birth_dt=datetime.date(int(request.POST['person.birth_dt__year']),
int(request.POST['person.birth_dt__month']),
int(request.POST['person.birth_dt__day'])))
new_person.save()
conn = AuthUserXPerson(auth_user=_new_user, person=new_person)
conn.save()
return JsonResponse({})
@superuser_only
def get_teacher_users(request: WSGIRequest) -> JsonResponse:
return JsonResponse([{
'surname': i.person.person_surname_txt,
'name': i.person.person_name_txt,
'father_name': i.person.person_father_name_txt,
'username': i.auth_user.username,
'id': i.person.id
} for i in AuthUserXPerson.objects.all()])
@superuser_only
def submit_registration_form(request: WSGIRequest) -> JsonResponse:
template = RegistrationRequest.objects.get(pk=request.POST['id'])
_new_user = User.objects.create_user(username=template.username, password=template.password)
_new_user.save()
new_person = Person(person_surname_txt=template.person_surname_txt,
person_name_txt=template.person_name_txt,
person_father_name_txt=template.person_father_name_txt,
birth_dt=template.birth_dt, change_user=request.user)
new_person.save()
AuthUserXPerson.objects.create(auth_user=_new_user, person=new_person)
return JsonResponse({})
@superuser_only
def submit_student_form(request: WSGIRequest) -> JsonResponse:
try:
req = StudentRequest.objects.get(pk=request.POST['id'])
now = datetime.datetime.now().date()
ed_year = now.year - req.student_class
if now.month > 6:
ed_year += 1
except Exception:
return JsonResponse({'result': False, 'error': 'Ошибка обработки класса'})
try:
add_student(request.user,
document_no=[req.student_document_no, req.payer_document_no],
document_series=[req.student_document_series, req.payer_document_series],
person_surname_txt=[req.student_surname_txt, req.payer_surname_txt],
person_name_txt=[req.student_name_txt, req.payer_name_txt],
person_father_name_txt=[req.student_father_name_txt, req.payer_father_name_txt],
authority_no=[req.student_authority_no, req.payer_authority_no],
authority_txt=[req.student_authority_txt, req.payer_authority_txt],
issue_dt=[req.student_issue_dt, req.payer_issue_dt],
document_type_txt=[req.student_document_type_txt, req.payer_document_type_txt],
region_cd=[req.student_region_cd, req.payer_region_cd],
area_txt=[req.student_area_txt, req.payer_area_txt],
city_txt=[req.student_city_txt, req.payer_city_txt],
street_txt=[req.student_street_txt, req.payer_street_txt],
house_txt=[req.student_house_txt, req.payer_house_txt],
building_no=[req.student_building_no, req.payer_building_no],
structure_no=[req.student_structure_no, req.payer_structure_no],
flat_nm=[req.student_flat_nm, req.payer_flat_nm],
birth_dt=[req.student_birth_dt],
education_dt=[datetime.date(ed_year, 9, 1)],
school_name_txt=[req.student_school_name_txt],
liter=[req.student_liter],
student_phone_no=[req.student_phone_no],
payer_phone_no=[req.payer_phone_no],
payer_inn_no=[req.payer_inn_no],
course_element=req.courses.split(' ')
)
req.delete()
return JsonResponse({'result': True})
except Exception:
return JsonResponse({'result': False, 'error': 'Ошибка обработки полей'})
def search_dates_in_json(data: dict):
dates_keys = {}
for i, j in data.items():
if type(j) is not dict:
divided = i.split('__')
if len(divided) == 2:
if divided[0] not in dates_keys:
dates_keys[divided[0]] = {divided[1]: j}
else:
dates_keys[divided[0]][divided[1]] = j
else:
search_dates_in_json(j)
for i, j in dates_keys.items():
try:
date = datetime.date(int(j['year']), int(j['month']), int(j['day']))
del data[i + '__year']
del data[i + '__month']
del data[i + '__day']
data[i] = date
except Exception:
pass
def generate_object(data: dict, object_elem):
for i, j in data.items():
if type(j) is dict:
elem = (object_elem._meta.get_field(i).related_model)()
generate_object(j, elem)
data[i] = elem
setattr(object_elem, i, data[i])
object_elem.save()
@superuser_only
def course_element_add(request: WSGIRequest) -> JsonResponse:
data = dict(request.POST)
course_element = CourseElement.objects.create(course_id=request.POST['course_id'],
teacher_person_id=request.POST['teacher_id'])
course_element.save()
course_element_id = course_element.pk
for k in range(0, 7):
if data['course_class_start_hour'][k] != '' and data['course_class_start_minute'][k] != '' and \
data['course_class_end_hour'][k] != '' and data['course_class_end_minute'][k] != '':
CourseClass(start_tm=datetime.time(int(data['course_class_start_hour'][k]),
int(data['course_class_start_minute'][k])),
end_tm=datetime.time(int(data['course_class_end_hour'][k]),
int(data['course_class_end_minute'][k])),
week_day_txt=str(k), course_element_id=course_element_id).save()
return JsonResponse({
'html': render(request, 'models/course_element/main.html', {'object': course_element}).content.decode('utf-8')
})
def add_object(request: WSGIRequest, object_type) -> JsonResponse:
elem = MODEL_TYPES_DICT[object_type].type_name()
args_dict = {}
args = {i: j[0] for i, j in dict(request.POST).items()}
for i, j in dict(request.POST).items():
path = i.split('.')
last_elem = args_dict
for k in path[:-1]:
if k not in last_elem:
last_elem[k] = {}
last_elem = last_elem[k]
last_elem[path[-1]] = j[0]
search_dates_in_json(args_dict)
if object_type == 'contract':
flat_nm = args_dict['student_address']['flat_nm']
args_dict['student_address']['flat_nm'] = None if flat_nm is '' else int(flat_nm)
flat_nm = args_dict['payer_address']['flat_nm']
args_dict['payer_address']['flat_nm'] = None if flat_nm is '' else int(flat_nm)
if args_dict['student_document']['document_type_txt'] == '':
del args_dict['student_document']
del args_dict['student_address']
generate_object(args_dict, elem)
return JsonResponse({
'html': render(request, 'models/' + object_type +'/main.html', {'object': elem}).content.decode('utf-8')
})
def add_course_class(request: WSGIRequest) -> JsonResponse:
try:
date = datetime.date(int(request.POST['class_dt_year']), int(request.POST['class_dt_month']), int(request.POST['class_dt_day']))
except Exception:
return JsonResponse({'result': False, 'error': 'Неправильный формат даты'})
try:
start_tm = datetime.time(int(request.POST['start_tm_hour']), int(request.POST['start_tm_minute']))
except Exception:
return JsonResponse({'result': False, 'error': 'Неправильный формат времени начала'})
try:
end_tm = datetime.time(int(request.POST['end_tm_hour']), int(request.POST['end_tm_minute']))
except Exception:
return JsonResponse({'result': False, 'error': 'Неправильный формат времени окончания'})
try:
ce = CourseElement.objects.get(pk=request.POST['course_element_id'])
if ce.teacher_person.authuserxperson.auth_user != request.user:
return JsonResponse({'result': False, 'error': 'ОШИБКА БЕЗОПАСНОСТИ: ДАННЫЙ ЭЛЕМЕНТ НЕ ПРИНАДЛЕЖИТ ВАМ'})
except Exception:
return JsonResponse({'result': False, 'error': 'ОШИБКА БЕЗОПАСНОСТИ: НЕ СУЩЕСТВУЮЩИЙ ЭЛЕМЕНТ'})
# try:
#
# except Exception:
# return JsonResponse({'html': render(request, 'models/course_class/main.html', {'object': elem}).content.decode('utf-8')})
elem = CourseElementDefiniteClass(course_element_id=request.POST['course_element_id'], class_dt=date,
start_tm=start_tm, end_tm=end_tm)
elem.save()
return JsonResponse({
'result': True,
'html': render(request, 'models/course_class/main.html', {'object': elem}).content.decode('utf-8')
})
def edit_course_class(request: WSGIRequest) -> JsonResponse:
try:
date = datetime.date(int(request.POST['class_dt_year']), int(request.POST['class_dt_month']), int(request.POST['class_dt_day']))
except Exception:
return JsonResponse({'result': False, 'error': 'Неправильный формат даты'})
try:
start_tm = datetime.time(int(request.POST['start_tm_hour']), int(request.POST['start_tm_minute']))
except Exception:
return JsonResponse({'result': False, 'error': 'Неправильный формат времени начала'})
try:
end_tm = datetime.time(int(request.POST['end_tm_hour']), int(request.POST['end_tm_minute']))
except Exception:
return JsonResponse({'result': False, 'error': 'Неправильный формат времени окончания'})
try:
ce = CourseElementDefiniteClass.objects.get(pk=request.POST['id'])
if ce.course_element.teacher_person.authuserxperson.auth_user != request.user:
return JsonResponse({'result': False, 'error': 'ОШИБКА БЕЗОПАСНОСТИ: ДАННЫЙ ЭЛЕМЕНТ НЕ ПРИНАДЛЕЖИТ ВАМ'})
ce.class_dt = date
ce.start_tm = start_tm
ce.end_tm = end_tm
ce.save()
return JsonResponse({
'result': True
})
except Exception:
return JsonResponse({'result': False, 'error': 'ОШИБКА БЕЗОПАСНОСТИ: НЕ СУЩЕСТВУЮЩИЙ ЭЛЕМЕНТ'})
def delete_course_class(request: WSGIRequest) -> JsonResponse:
try:
ce = CourseElementDefiniteClass.objects.get(pk=request.POST['id'])
if ce.course_element.teacher_person.authuserxperson.auth_user != request.user:
return JsonResponse({'result': False, 'error': 'ОШИБКА БЕЗОПАСНОСТИ: ДАННЫЙ ЭЛЕМЕНТ НЕ ПРИНАДЛЕЖИТ ВАМ'})
ce.delete()
return JsonResponse({
'result': True
})
except Exception:
return JsonResponse({'result': False, 'error': 'ОШИБКА БЕЗОПАСНОСТИ: НЕ СУЩЕСТВУЮЩИЙ ЭЛЕМЕНТ'})
def get_session_data(request: WSGIRequest) -> JsonResponse:
models_dict = {}
parent_dir = os.path.join(os.path.join(cspc.settings.BASE_DIR, 'templates'), 'models')
for subdir in next(os.walk(parent_dir))[1]:
child_1_dir = os.path.join(parent_dir, subdir)
models_dict[subdir] = {file[:-5]: open(os.path.join(child_1_dir, file), 'r').read()
for file in next(os.walk(child_1_dir))[2]}
return JsonResponse({
'models': models_dict,
'statics': {
'REGIONS_DICT': REGIONS_DICT,
'PAYMENT_TYPES': cspcapp.constants.PAYMENT_TYPES,
'DAYS_OF_WEEK': cspcapp.constants.DAYS_OF_WEEK,
'courses': [i for i in Course.objects.values()],
'course_elements': [i for i in CourseElement.objects.values()],
'teachers': [i for i in AuthUserXPerson.objects.values()],
'user': {'pk': request.user.pk, 'username': request.user.username, 'is_superuser': request.user.is_superuser}
}
})
@superuser_only
def change_user_password(request: WSGIRequest) -> JsonResponse:
if request.POST:
if 'user_id' not in request.POST:
JsonResponse({'result': False, 'error': 'отсутствует id пользователя'})
if 'new_password' not in request.POST or request.POST['new_password'] == '':
JsonResponse({'result': False, 'error': 'пустой пароль'})
u = User.objects.get(pk=request.POST['user_id'])
u.set_password(request.POST['new_password'])
u.save()
return JsonResponse({'result': True})
else:
return JsonResponse({'result': False, 'error': 'отсутствует тело запроса'})
|
the-stack_0_23373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
import itertools
import copy
import re
from alex.components.slu.da import DialogueAct
from alex.utils.config import load_as_module
from alex.components.nlg.tectotpl.core.run import Scenario
from alex.components.nlg.exceptions import TemplateNLGException
from alex.components.dm.ontology import Ontology
class AbstractTemplateNLG(object):
"""\
Base abstract class for template-filling generators, providing the
routines for template loading and selection.
The generation (i.e. template filling) is left to the derived classes.
It implements numerous backoff strategies:
1) it matches the exactly the input dialogue against the templates
2) if it cannot find exact match, then it tries to find a generic template (slot-independent)
3) if it cannot find a generic template, the it tries to compose
the template from templates for individual dialogue act items
"""
def __init__(self, cfg):
"""\
Constructor, just save a link to the configuration.
"""
self.cfg = cfg
# this will save the last utterance
self.last_utterance = u""
# setup the composing strategy
self.compose_utterance = self.compose_utterance_greedy
self.compose_greedy_lookahead = 5
if 'NLG' in self.cfg and 'TemplateCompose' in self.cfg['NLG']:
compose_setting = \
self.cfg['NLG']['TemplateCompose'].tolower().strip()
if compose_setting.startswith('greedy'):
self.compose_utterance = self.compose_utterance_greedy
self.compose_greedy_lookahead = \
int(re.search(r'\d+', compose_setting).group(0))
elif compose_setting == 'single':
self.compose_utterance = self.compose_utterance_single
def load_templates(self, file_name):
"""\
Load templates from an external file, which is assumed to be a
Python source which defines the variable 'templates' as a dictionary
containing stringified dialog acts as keys and (lists of) templates
as values.
"""
try:
templates = load_as_module(file_name, force=True).templates
# normalize the templates
self.templates = {}
# generalised templates
self.gtemplates = {}
for k, v in templates.iteritems():
da = DialogueAct(k)
# k.sort()
self.templates[unicode(da)] = v
self.gtemplates[unicode(self.get_generic_da(da))] = (da, v)
except Exception as e:
raise TemplateNLGException('No templates loaded from %s -- %s!' % (file_name, e))
def get_generic_da(self, da):
"""\
Given a dialogue act and a list of slots and values, substitute
the generic values (starting with { and ending with }) with empty string.
"""
# copy the instance
da = copy.deepcopy(da)
# find matching slots & values
for dai in da:
if dai.value and dai.value.startswith('{'):
# there is match, make it generic
dai.value = "{%s}" % dai.name
return da
def get_generic_da_given_svs(self, da, svs):
"""\
Given a dialogue act and a list of slots and values, substitute
the matching slot and values with empty string.
"""
# copy the instance
da = copy.deepcopy(da)
# find matching slots & values
for name, value in svs:
for dai in da:
if dai.name == name and dai.value == value:
# there is match, make it generic
dai.value = "{%s}" % dai.name
return da
def match_generic_templates(self, da, svs):
"""\
Find a matching template for a dialogue act using substitutions
for slot values.
Returns a matching template and a dialogue act where values of some
of the slots are substituted with a generic value.
"""
tpl = None
# try to find increasingly generic templates
# limit the complexity of the search
if len(svs) == 0:
rng = []
elif len(svs) == 1:
rng = [1]
elif len(svs) == 2:
rng = [1, 2]
else:
rng = [1, len(svs) - 1, len(svs)]
for r in rng:
for cmb in itertools.combinations(svs, r):
generic_da = self.get_generic_da_given_svs(da, cmb)
try:
gda, tpls = self.gtemplates[unicode(generic_da)]
tpl = self.random_select(tpls)
except KeyError:
continue
return tpl, gda
# I did not find anything
raise TemplateNLGException("No match with generic templates.")
def random_select(self, tpl):
"""\
Randomly select alternative templates for generation.
The selection process is modeled by an embedded list structure
(a tree-like structure).
In the first level, the algorithm selects one of N.
In the second level, for every item it selects one of M,
and joins them together.
This continues toward the leaves which must be non-list objects.
There are the following random selection options (only the first
three):
(1)
{
'hello()' : u"Hello",
}
This will return the "Hello" string.
(2)
{
'hello()' : (u"Hello",
u"Hi",
),
}
This will return one of the "Hello" or "Hi" strings.
(2)
{
'hello()' : (
[
(u"Hello.",
u"Hi.",
)
(u"How are you doing?",
u"Welcome".,
),
u"Speak!",
],
u"Hi my friend."
),
}
This will return one of the following strings:
"Hello. How are you doing? Speak!"
"Hi. How are you doing? Speak!"
"Hello. Welcome. Speak!"
"Hi. Welcome. Speak!"
"Hi my friend."
"""
if isinstance(tpl, basestring):
return tpl
elif isinstance(tpl, tuple):
tpl_rc_or = random.choice(tpl)
if isinstance(tpl_rc_or, basestring):
return tpl_rc_or
elif isinstance(tpl_rc_or, list):
tpl_rc_and = []
for t in tpl_rc_or:
tpl_rc_and.append(self.random_select(t))
return u" ".join(tpl_rc_and).replace(u' ', u' ')
elif isinstance(tpl_rc_or, tuple):
raise TemplateNLGException("Unsupported generation type. " +
"At this level, the template" +
"cannot be a tuple: template = %s" %
unicode(tpl))
elif isinstance(tpl, list):
raise TemplateNLGException("Unsupported generation type. " +
"At this level, the template cannot " +
"be a list: template = %s" %
unicode(tpl))
else:
raise TemplateNLGException("Unsupported generation type.")
def match_and_fill_generic(self, da, svs):
"""\
Match a generic template and fill in the proper values for the slots
which were substituted by a generic value.
Will return the output text with the proper values filled in if a
generic template can be found; will throw a TemplateNLGException
otherwise.
"""
# find a generic template
tpls, mda = self.match_generic_templates(da, svs)
tpl = self.random_select(tpls)
svs_mda = mda.get_slots_and_values()
# prepare a list of generic values to be filled in
svsx = []
for (slot_orig, val_orig), (_, val_generic) in zip(svs, svs_mda):
if val_generic.startswith('{'):
svsx.append([val_generic[1:-1], val_orig])
else:
svsx.append([slot_orig, val_orig])
# return with generic values filled in
return self.fill_in_template(tpl, svsx)
def generate(self, da):
"""\
Generate the natural text output for the given dialogue act.
First, try to find an exact match with no variables to fill in.
Then try to find a relaxed match of a more generic template and
fill in the actual values of the variables.
"""
utterance = ''
try:
if unicode(da) == 'irepeat()':
# just return last utterance
utterance = self.last_utterance
else:
# try to return exact match
utterance = self.random_select(self.templates[unicode(da)])
except KeyError:
# try to find a relaxed match
svs = da.get_slots_and_values()
try:
utterance = self.match_and_fill_generic(da, svs)
except TemplateNLGException:
# try to find a template for each dialogue act item and concatenate them
try:
utterance = self.compose_utterance(da)
except TemplateNLGException:
# nothing to do, I must backoff
utterance = self.backoff(da)
if re.match(r'^(inform|i?confirm|request|hello)', unicode(da)):
self.last_utterance = utterance
return utterance
def compose_utterance_single(self, da):
"""\
Compose an utterance from templates for single dialogue act items.
Returns the composed utterance.
"""
composed_utt = []
# try to find a template for each single dialogue act item
for dai in da:
try:
# look for an exact match
dai_utt = self.random_select(self.templates[unicode(dai)])
except KeyError:
# try to find a relaxed match
dax = DialogueAct()
dax.append(dai)
svsx = dax.get_slots_and_values()
try:
dai_utt = self.match_and_fill_generic(dax, svsx)
except TemplateNLGException:
dai_utt = unicode(dai)
composed_utt.append(dai_utt)
return ' '.join(composed_utt)
def compose_utterance_greedy(self, da):
"""\
Compose an utterance from templates by iteratively looking for
the longest (up to self.compose_greedy_lookahead) matching
sub-utterance at the current position in the DA.
Returns the composed utterance.
"""
composed_utt = []
sub_start = 0
# pass through the dialogue act
while sub_start < len(da):
dax_utt = None
dax_len = None
# greedily look for the longest template that will cover the next
# dialogue act items (try longer templates first, from maximum
# length given in settings down to 1).
for sub_len in xrange(self.compose_greedy_lookahead, 0, -1):
dax = DialogueAct()
dax.extend(da[sub_start:sub_start + sub_len])
try:
# try to find an exact match
dax_utt = self.random_select(self.templates[unicode(dax)])
dax_len = sub_len
break
except KeyError:
# try to find a relaxed match
svsx = dax.get_slots_and_values()
try:
dax_utt = self.match_and_fill_generic(dax, svsx)
dax_len = sub_len
break
except TemplateNLGException:
# nothing found: look for shorter templates
continue
if dax_utt is None: # dummy backoff
dax_utt = unicode(da[sub_start])
dax_len = 1
composed_utt.append(dax_utt)
sub_start += dax_len
return ' '.join(composed_utt)
def fill_in_template(self, tpl, svs):
"""\
Fill in the given slot values of a dialogue act into the given
template. This should be implemented in derived classes.
"""
raise NotImplementedError()
def backoff(self, da):
"""\
Provide an alternative NLG template for the dialogue
output which is not covered in the templates.
This serves as a backoff solution.
This should be implemented in derived classes.
"""
raise NotImplementedError()
class TemplateNLG(AbstractTemplateNLG):
"""\
A simple text-replacement template NLG implementation with the
ability to resort to a back-off system if no appropriate template is
found.
"""
def __init__(self, cfg):
super(TemplateNLG, self).__init__(cfg)
# load templates
if 'model' in self.cfg['NLG']['Template']:
self.load_templates(self.cfg['NLG']['Template']['model'])
# load ontology
self.ontology = Ontology()
if 'ontology' in self.cfg['NLG']['Template']:
self.ontology.load(cfg['NLG']['Template']['ontology'])
# initialize pre- and post-processing
self.preprocessing = None
self.postprocessing = None
if 'preprocessing_cls' in self.cfg['NLG']['Template']:
if 'preprocessing_config' in self.cfg['NLG']['Template']:
self.preprocessing = self.cfg['NLG']['Template']['preprocessing_cls'](
self.ontology, self.cfg['NLG']['Template']['preprocessing_config'])
else:
self.preprocessing = self.cfg['NLG']['Template']['preprocessing_cls'](self.ontology)
if 'postprocessing_cls' in self.cfg['NLG']['Template']:
self.postprocessing = self.cfg['NLG']['Template']['postprocessing_cls']()
def fill_in_template(self, tpl, svs):
"""\
Simple text replacement template filling.
Applies template NLG pre- and postprocessing, if applicable.
"""
svs_dict = dict(svs)
if self.preprocessing is not None:
tpl, svs_dict = self.preprocessing.preprocess(tpl, svs_dict)
out_text = tpl.format(**svs_dict)
if self.postprocessing is not None:
return self.postprocessing.postprocess(out_text)
return out_text
class TemplateNLGPreprocessing(object):
"""Base class for template NLG preprocessing, handles preprocessing of the
values to be filled into a template.
This base class provides no functionality, it just defines an interface
for derived language-specific and/or domain-specific classes.
"""
def __init__(self, ontology):
self.ontology = ontology
def preprocess(self, svs_dict):
raise NotImplementedError()
class TemplateNLGPostprocessing(object):
"""Base class for template NLG postprocessing, handles postprocessing of the
text resulting from filling in a template.
This base class provides no functionality, it just defines an interface
for derived language-specific and/or domain-specific classes.
"""
def __init__(self):
pass
def postprocess(self, nlg_text):
raise NotImplementedError()
class TectoTemplateNLG(AbstractTemplateNLG):
"""\
Template generation using tecto-trees and NLG rules.
"""
def __init__(self, cfg):
"""\
Initialization, checking configuration, loading
templates and NLG rules.
"""
super(TectoTemplateNLG, self).__init__(cfg)
# check that the configuration contains everything we need
if not 'NLG' in self.cfg or not 'TectoTemplate' in self.cfg['NLG']:
raise TemplateNLGException('No configuration found!')
mycfg = self.cfg['NLG']['TectoTemplate']
if not 'model' in mycfg or not 'scenario' in mycfg or \
not 'data_dir' in mycfg:
raise TemplateNLGException('NLG scenario, data directory ' +
'and templates must be defined!')
# load templates
self.load_templates(mycfg['model'])
# load NLG system
self.nlg_rules = Scenario(mycfg)
self.nlg_rules.load_blocks()
def fill_in_template(self, tpl, svs):
"""\
Filling in tecto-templates, i.e. filling-in strings to templates
and using rules to generate the result.
"""
tpl = unicode(tpl)
filled_tpl = tpl.format(**dict(svs))
return self.nlg_rules.apply_to(filled_tpl)
|
the-stack_0_23375 | import json
import logging
import time
from datetime import datetime, timedelta
import requests
__author__ = "Mark Ruys"
__copyright__ = "Copyright 2017, Mark Ruys"
__license__ = "MIT"
__email__ = "[email protected]"
class GoodWeApi:
def __init__(self, system_id, account, password):
self.system_id = system_id
self.account = account
self.password = password
self.token = '{"version":"v3.1","client":"ios","language":"en"}'
self.global_url = 'https://semsportal.com/api/'
self.base_url = self.global_url
def statusText(self, status):
labels = { -1 : 'Offline', 0 : 'Waiting', 1 : 'Normal', 2: 'Fault' }
return labels[status] if status in labels else 'Unknown'
def calcPvVoltage(self, data):
pv_voltages = [
data['vpv' + str(i)]
for i in range(1, 5)
if 'vpv' + str(i) in data
if data['vpv' + str(i)]
if data['vpv' + str(i)] < 6553
]
return round(sum(pv_voltages), 1)
def getCurrentReadings(self):
''' Download the most recent readings from the GoodWe API. '''
payload = {
'powerStationId' : self.system_id
}
data = self.call("v2/PowerStation/GetMonitorDetailByPowerstationId", payload)
hasPowerflow = data['hasPowerflow']
hasEnergeStatisticsCharts = data['hasEnergeStatisticsCharts']
no_meter = {
'status' : 'Unknown',
'itemp' : 0,
'pgrid_w' : 0,
'etotal_kwh' : 0,
'grid_voltage' : 0,
'pv_voltage' : 0,
'latitude' : data['info'].get('latitude'),
'longitude' : data['info'].get('longitude'),
'eday_kwh': 0,
'consumptionOfLoad' : 'None',
'load' : 'None'
}
w_powerflow = {
'consumptionOfLoad' : float(data['energeStatisticsCharts'].get('consumptionOfLoad', 0.0)),
'load' : float(self.parseValue(data['powerflow'].get('load', 0), ' (W) '))
}
w_statistics = {
'eday_kwh' : float(data['energeStatisticsCharts'].get('sum', 0)),
}
if hasEnergeStatisticsCharts:
result = { **no_meter, **w_statistics }
if hasPowerflow:
result = { **result, **w_powerflow }
else:
result = no_meter
count = 0
for inverterData in data['inverter']:
status = self.statusText(inverterData['status'])
if status == 'Normal':
result['status'] = status
result['pgrid_w'] += inverterData['out_pac']
result['grid_voltage'] += self.parseValue(inverterData['output_voltage'], 'V')
result['itemp'] += inverterData['tempperature']
result['pv_voltage'] += self.calcPvVoltage(inverterData['d'])
count += 1
if not hasEnergeStatisticsCharts:
result['eday_kwh'] += inverterData['eday']
result['etotal_kwh'] += inverterData['etotal']
if count > 0:
# These values should not be the sum, but the average
result['grid_voltage'] /= count
result['pv_voltage'] /= count
elif len(data['inverter']) > 0:
# We have no online inverters, then just pick the first
inverterData = data['inverter'][0]
result['status'] = self.statusText(inverterData['status'])
if hasPowerflow:
result['pgrid_w'] = self.parseValue(data['powerflow'].get('pv'), '(W)')
else:
result['pgrid_w'] = inverterData['out_pac']
result['grid_voltage'] = self.parseValue(inverterData['output_voltage'], 'V')
result['pv_voltage'] = self.calcPvVoltage(inverterData['d'])
message = "{status}, {pgrid_w} W now, Load {load} W now, {eday_kwh} kWh today, {etotal_kwh} kWh all time, {consumptionOfLoad} kWh used today {grid_voltage} V grid, {pv_voltage} V PV, {itemp} C".format(**result)
if result['status'] == 'Normal' or result['status'] == 'Offline':
logging.info(message)
else:
logging.warning(message)
return result
def getActualKwh(self, date):
payload = {
'powerstation_id' : self.system_id,
'count' : 1,
'date' : date.strftime('%Y-%m-%d')
}
data = self.call("v2/PowerStationMonitor/GetPowerStationPowerAndIncomeByDay", payload)
if not data:
logging.warning("GetPowerStationPowerAndIncomeByDay missing data")
return 0
eday_kwh = 0
for day in data:
if day['d'] == date.strftime('%m/%d/%Y'):
eday_kwh = day['p']
return eday_kwh
def getLocation(self):
payload = {
'powerStationId' : self.system_id
}
data = self.call("v2/PowerStation/GetMonitorDetailByPowerstationId", payload)
if 'info' not in data:
logging.warning("GetMonitorDetailByPowerstationId returned bad data: " + str(data))
return {}
return {
'latitude' : data['info'].get('latitude'),
'longitude' : data['info'].get('longitude'),
}
def getDayPac(self, date):
payload = {
'id' : self.system_id,
'date' : date.strftime('%Y-%m-%d')
}
data = self.call("v2/PowerStationMonitor/GetPowerStationPacByDayForApp", payload)
if 'pacs' not in data:
logging.warning("GetPowerStationPacByDayForApp returned bad data: " + str(data))
return []
return data['pacs']
def getDayReadings(self, date):
result = self.getLocation()
pacs = self.getDayPac(date)
hours = 0
kwh = 0
result['entries'] = []
for sample in pacs:
parsed_date = datetime.strptime(sample['date'], "%m/%d/%Y %H:%M:%S")
next_hours = parsed_date.hour + parsed_date.minute / 60
pgrid_w = sample['pac']
if pgrid_w > 0:
kwh += pgrid_w / 1000 * (next_hours - hours)
result['entries'].append({
'dt' : parsed_date,
'pgrid_w': pgrid_w,
'eday_kwh': round(kwh, 3)
})
hours = next_hours
eday_kwh = self.getActualKwh(date)
if eday_kwh > 0:
correction = eday_kwh / kwh
for sample in result['entries']:
sample['eday_kwh'] *= correction
return result
def call(self, url, payload):
for i in range(1, 4):
try:
headers = {
'User-Agent': 'SEMS Portal/3.1 (iPhone; iOS 13.5.1; Scale/2.00)',
'Token': self.token,
}
r = requests.post(self.base_url + url, headers=headers, data=payload, timeout=10)
r.raise_for_status()
data = r.json()
logging.debug(data)
try:
code = int(data['code'])
except ValueError:
raise Exception("Failed to call GoodWe API (no code)")
if code == 0 and data['data'] is not None:
return data['data']
elif code == 100001 or code == 100002:
loginPayload = {
'account': self.account,
'pwd': self.password,
}
r = requests.post(self.global_url + 'v2/Common/CrossLogin', headers=headers, data=loginPayload, timeout=10)
r.raise_for_status()
data = r.json()
if 'api' not in data:
raise Exception(data['msg'])
self.base_url = data['api']
self.token = json.dumps(data['data'])
else:
raise Exception("Failed to call GoodWe API (code {})".format(code))
except requests.exceptions.RequestException as exp:
logging.warning(exp)
time.sleep(i ** 3)
else:
raise Exception("Failed to call GoodWe API (too many retries)")
return {}
def parseValue(self, value, unit):
try:
return float(value.rstrip(unit))
except ValueError as exp:
logging.warning(exp)
return 0
|
the-stack_0_23377 |
import numpy as np
import os
from chainercv.datasets import VOCSemanticSegmentationDataset
from chainercv.evaluations import calc_semantic_segmentation_confusion
import imageio
def run(args):
dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir='/home/wdliu/VOC/VOCdevkit/VOC2012/')
labels = [dataset.get_example_by_keys(i, (1,))[0] for i in range(len(dataset))]
preds = []
for id in dataset.ids:
cls_labels = imageio.imread(os.path.join(args.sem_seg_out_dir, id + '.png')).astype(np.uint8)
cls_labels[cls_labels == 255] = 0
preds.append(cls_labels.copy())
confusion = calc_semantic_segmentation_confusion(preds, labels)[:21, :21]
gtj = confusion.sum(axis=1)
resj = confusion.sum(axis=0)
gtjresj = np.diag(confusion)
denominator = gtj + resj - gtjresj
fp = 1. - gtj / denominator
fn = 1. - resj / denominator
iou = gtjresj / denominator
print(fp[0], fn[0])
print(np.mean(fp[1:]), np.mean(fn[1:]))
print({'iou': iou, 'miou': np.nanmean(iou)})
|
the-stack_0_23379 | """My torch implementation of permutations and sinkhorn balancing ops.
A torch library of operations and sampling with permutations
and their approximation with doubly-stochastic matrices, through Sinkhorn balancing
"""
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.stats import kendalltau
import torch
#from torch.distributions import Bernoulli
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def my_sample_gumbel(shape, eps=1e-10):
"""Samples arbitrary-shaped standard gumbel variables.
Args:
shape: list of integers
eps: float, for numerical stability
Returns:
A sample of standard Gumbel random variables
"""
#Sample from Gumbel(0, 1)
U = torch.rand(shape, dtype=torch.float, device=device)
return -torch.log(eps - torch.log(U + eps))
def simple_sinkhorn(MatrixA, n_iter = 20):
#performing simple Sinkhorn iterations.
for i in range(n_iter):
MatrixA /= MatrixA.sum(dim=1, keepdim=True)
MatrixA /= MatrixA.sum(dim=2, keepdim=True)
return MatrixA
def my_sinkhorn(log_alpha, n_iters = 20):
# torch version
"""Performs incomplete Sinkhorn normalization to log_alpha.
By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix
with positive entries can be turned into a doubly-stochastic matrix
(i.e. its rows and columns add up to one) via the successive row and column
normalization.
-To ensure positivity, the effective input to sinkhorn has to be
exp(log_alpha) (element wise).
-However, for stability, sinkhorn works in the log-space. It is only at
return time that entries are exponentiated.
[1] Sinkhorn, Richard and Knopp, Paul.
Concerning nonnegative matrices and doubly stochastic
matrices. Pacific Journal of Mathematics, 1967
Args:
log_alpha: a 2D tensor of shape [N, N]
n_iters: number of sinkhorn iterations (in practice, as little as 20
iterations are needed to achieve decent convergence for N~100)
Returns:
A 3D tensor of close-to-doubly-stochastic matrices (2D tensors are
converted to 3D tensors with batch_size equals to 1)
"""
n = log_alpha.size()[1]
log_alpha = log_alpha.view(-1, n, n)
for i in range(n_iters):
# torch.logsumexp(input, dim, keepdim, out=None)
#Returns the log of summed exponentials of each row of the input tensor in the given dimension dim
#log_alpha -= (torch.logsumexp(log_alpha, dim=2, keepdim=True)).view(-1, n, 1)
#log_alpha -= (torch.logsumexp(log_alpha, dim=1, keepdim=True)).view(-1, 1, n)
#avoid in-place
log_alpha = log_alpha - (torch.logsumexp(log_alpha, dim=2, keepdim=True)).view(-1, n, 1)
log_alpha = log_alpha - (torch.logsumexp(log_alpha, dim=1, keepdim=True)).view(-1, 1, n)
return torch.exp(log_alpha)
def my_gumbel_sinkhorn(log_alpha, temp=1.0, n_samples=1, noise_factor=1.0, n_iters=20, squeeze=True):
"""Random doubly-stochastic matrices via gumbel noise.
In the zero-temperature limit sinkhorn(log_alpha/temp) approaches
a permutation matrix. Therefore, for low temperatures this method can be
seen as an approximate sampling of permutation matrices, where the
distribution is parameterized by the matrix log_alpha
The deterministic case (noise_factor=0) is also interesting: it can be
shown that lim t->0 sinkhorn(log_alpha/t) = M, where M is a
permutation matrix, the solution of the
matching problem M=arg max_M sum_i,j log_alpha_i,j M_i,j.
Therefore, the deterministic limit case of gumbel_sinkhorn can be seen
as approximate solving of a matching problem, otherwise solved via the
Hungarian algorithm.
Warning: the convergence holds true in the limit case n_iters = infty.
Unfortunately, in practice n_iter is finite which can lead to numerical
instabilities, mostly if temp is very low. Those manifest as
pseudo-convergence or some row-columns to fractional entries (e.g.
a row having two entries with 0.5, instead of a single 1.0)
To minimize those effects, try increasing n_iter for decreased temp.
On the other hand, too-low temperature usually lead to high-variance in
gradients, so better not choose too low temperatures.
Args:
log_alpha: 2D tensor (a matrix of shape [N, N])
or 3D tensor (a batch of matrices of shape = [batch_size, N, N])
temp: temperature parameter, a float.
n_samples: number of samples
noise_factor: scaling factor for the gumbel samples. Mostly to explore
different degrees of randomness (and the absence of randomness, with
noise_factor=0)
n_iters: number of sinkhorn iterations. Should be chosen carefully, in
inverse correspondence with temp to avoid numerical instabilities.
squeeze: a boolean, if True and there is a single sample, the output will
remain being a 3D tensor.
Returns:
sink: a 4D tensor of [batch_size, n_samples, N, N] i.e.
batch_size *n_samples doubly-stochastic matrices. If n_samples = 1 and
squeeze = True then the output is 3D.
log_alpha_w_noise: a 4D tensor of [batch_size, n_samples, N, N] of
noisy samples of log_alpha, divided by the temperature parameter. Ifmy_invert_listperm
n_samples = 1 then the output is 3D.
"""
n = log_alpha.size()[1]
log_alpha = log_alpha.view(-1, n, n)
batch_size = log_alpha.size()[0]
#log_alpha_w_noise = log_alpha[:,None,:,:].expand(batch_size, n_samples, n, n)
log_alpha_w_noise = log_alpha.repeat(n_samples, 1, 1)
if noise_factor == 0:
noise = 0.0
else:
noise = my_sample_gumbel([n_samples*batch_size, n, n])*noise_factor
log_alpha_w_noise = log_alpha_w_noise + noise
log_alpha_w_noise = log_alpha_w_noise / temp
my_log_alpha_w_noise = log_alpha_w_noise.clone()
sink = my_sinkhorn(my_log_alpha_w_noise)
if n_samples > 1 or squeeze is False:
sink = sink.view(n_samples, batch_size, n, n)
sink = torch.transpose(sink, 1, 0)
log_alpha_w_noise = log_alpha_w_noise.view(n_samples, batch_size, n, n)
log_alpha_w_noise = torch.transpose(log_alpha_w_noise, 1, 0)
return sink, log_alpha_w_noise
def my_sample_uniform_and_order(n_lists, n_numbers, prob_inc):
"""Samples uniform random numbers, return sorted lists and the indices of their original values
Returns a 2-D tensor of n_lists lists of n_numbers sorted numbers in the [0,1]
interval, each of them having n_numbers elements.
Lists are increasing with probability prob_inc.
It does so by first sampling uniform random numbers, and then sorting them.
Therefore, sorted numbers follow the distribution of the order statistics of
a uniform distribution.
It also returns the random numbers and the lists of permutations p such
p(sorted) = random.
Notice that if one ones to build sorted numbers in different intervals, one
might just want to re-scaled this canonical form.
Args:
n_lists: An int,the number of lists to be sorted.
n_numbers: An int, the number of elements in the permutation.
prob_inc: A float, the probability that a list of numbers will be sorted in
increasing order.
Returns:
ordered: a 2-D float tensor with shape = [n_list, n_numbers] of sorted lists
of numbers.
random: a 2-D float tensor with shape = [n_list, n_numbers] of uniform random
numbers.
permutations: a 2-D int tensor with shape = [n_list, n_numbers], row i
satisfies ordered[i, permutations[i]) = random[i,:].
"""
# sample n_lists samples from Bernoulli with probability of prob_inc
my_bern = torch.distributions.Bernoulli(torch.tensor([prob_inc])).sample([n_lists])
sign = -1*((my_bern * 2) -torch.ones([n_lists,1]))
sign = sign.type(torch.float32)
random =(torch.empty(n_lists, n_numbers).uniform_(0, 1))
random =random.type(torch.float32)
# my change
#random_with_sign = random * sign
#Finds sorted values and indices of the k largest entries for the last dimension.
#sorted – controls whether to return the elements in sorted order
#ordered, permutations = torch.topk(random_with_sign, k = n_numbers, sorted = True)
# my change
ordered, permutations = torch.sort(random, descending=True)
#my change
#ordered = ordered * sign
return ordered, random, permutations
def my_sample_permutations(n_permutations, n_objects):
"""Samples a batch permutations from the uniform distribution.
Returns a sample of n_permutations permutations of n_objects indices.
Permutations are assumed to be represented as lists of integers
(see 'listperm2matperm' and 'matperm2listperm' for conversion to alternative
matricial representation). It does so by sampling from a continuous
distribution and then ranking the elements. By symmetry, the resulting
distribution over permutations must be uniform.
Args:
n_permutations: An int, the number of permutations to sample.
n_objects: An int, the number of elements in the permutation.
the embedding sources.
Returns:
A 2D integer tensor with shape [n_permutations, n_objects], where each
row is a permutation of range(n_objects)
"""
random_pre_perm = torch.empty(n_permutations, n_objects).uniform_(0, 1)
_, permutations = torch.topk(random_pre_perm, k = n_objects)
return permutations
def my_permute_batch_split(batch_split, permutations):
"""Scrambles a batch of objects according to permutations.
It takes a 3D tensor [batch_size, n_objects, object_size]
and permutes items in axis=1 according to the 2D integer tensor
permutations, (with shape [batch_size, n_objects]) a list of permutations
expressed as lists. For many dimensional-objects (e.g. images), objects have
to be flattened so they will respect the 3D format, i.e. tf.reshape(
batch_split, [batch_size, n_objects, -1])
Args:
batch_split: 3D tensor with shape = [batch_size, n_objects, object_size] of
splitted objects
permutations: a 2D integer tensor with shape = [batch_size, n_objects] of
permutations, so that permutations[n] is a permutation of range(n_objects)
Returns:
A 3D tensor perm_batch_split with the same shape as batch_split,
so that perm_batch_split[n, j,:] = batch_split[n, perm[n,j],:]
"""
batch_size= permutations.size()[0]
n_objects = permutations.size()[1]
permutations = permutations.view(batch_size, n_objects, -1)
perm_batch_split = torch.gather(batch_split, 1, permutations)
return perm_batch_split
def my_listperm2matperm(listperm):
"""Converts a batch of permutations to its matricial form.
Args:
listperm: 2D tensor of permutations of shape [batch_size, n_objects] so that
listperm[n] is a permutation of range(n_objects).
Returns:
a 3D tensor of permutations matperm of
shape = [batch_size, n_objects, n_objects] so that matperm[n, :, :] is a
permutation of the identity matrix, with matperm[n, i, listperm[n,i]] = 1
"""
n_objects = listperm.size()[1]
eye = torch.eye(n_objects, dtype=torch.int, device=listperm.device)[listperm]
# eye= torch.tensor(eye, dtype=torch.int32)
return eye
def my_matperm2listperm(matperm):
"""Converts a batch of permutations to its enumeration (list) form.
Args:
matperm: a 3D tensor of permutations of
shape = [batch_size, n_objects, n_objects] so that matperm[n, :, :] is a
permutation of the identity matrix. If the input is 2D, it is reshaped
to 3D with batch_size = 1.
dtype: output_type (int32, int64)
Returns:
A 2D tensor of permutations listperm, where listperm[n,i]
is the index of the only non-zero entry in matperm[n, i, :]
"""
batch_size = matperm.size()[0]
n_objects = matperm.size()[1]
matperm = matperm.view(-1, n_objects, n_objects)
#argmax is the index location of each maximum value found(argmax)
_, argmax = torch.max(matperm, dim=2, keepdim= True)
argmax = argmax.view(batch_size, n_objects)
return argmax
def my_invert_listperm(listperm):
"""Inverts a batch of permutations.
Args:
listperm: a 2D integer tensor of permutations listperm of
shape = [batch_size, n_objects] so that listperm[n] is a permutation of
range(n_objects)
Returns:
A 2D tensor of permutations listperm, where listperm[n,i]
is the index of the only non-zero entry in matperm[n, i, :]
"""
return my_matperm2listperm(torch.transpose(my_listperm2matperm(listperm), 1, 2))
def my_matching(matrix_batch):
"""Solves a matching problem for a batch of matrices.
This is a wrapper for the scipy.optimize.linear_sum_assignment function. It
solves the optimization problem max_P sum_i,j M_i,j P_i,j with P a
permutation matrix. Notice the negative sign; the reason, the original
function solves a minimization problem
Args:
matrix_batch: A 3D tensor (a batch of matrices) with
shape = [batch_size, N, N]. If 2D, the input is reshaped to 3D with
batch_size = 1.
Returns:
listperms, a 2D integer tensor of permutations with shape [batch_size, N]
so that listperms[n, :] is the permutation of range(N) that solves the
problem max_P sum_i,j M_i,j P_i,j with M = matrix_batch[n, :, :].
"""
def hungarian(x):
if x.ndim == 2:
x = np.reshape(x, [1, x.shape[0], x.shape[1]])
sol = np.zeros((x.shape[0], x.shape[1]), dtype=np.int32)
for i in range(x.shape[0]):
sol[i, :] = linear_sum_assignment(-x[i, :])[1].astype(np.int32)
return sol
listperms = hungarian(matrix_batch.cpu().detach().numpy())
# listperms = torch.from_numpy(listperms)
listperms = torch.tensor(listperms, dtype=torch.long)
return listperms
def my_kendall_tau(batch_perm1, batch_perm2):
"""Wraps scipy.stats kendalltau function.
Args:
batch_perm1: A 2D tensor (a batch of matrices) with
shape = [batch_size, N]
batch_perm2: same as batch_perm1
Returns:
A list of Kendall distances between each of the elements of the batch.
"""
def kendalltau_batch(x, y):
if x.ndim == 1:
x = np.reshape(x, [1, x.shape[0]])
if y.ndim == 1:
y = np.reshape(y, [1, y.shape[0]])
kendall = np.zeros((x.shape[0], 1), dtype=np.float32)
for i in range(x.shape[0]):
kendall[i, :] = kendalltau(x[i, :], y[i, :])[0]
return kendall
listkendall = kendalltau_batch(batch_perm1.cpu().numpy(), batch_perm2.cpu().numpy())
listkendall = torch.from_numpy(listkendall)
return listkendall
|
the-stack_0_23380 | import string
import math
# Positional inverted index, postings in form:
# <document id, term frequency in document, term positions in document>
class InvertedIndex:
num_documents = 0
inverted_index = {}
# Cached indices of positions last returned by prev/next calls for a term.
prev_cache = {}
next_cache = {}
# Read from a file and build the inverted index
def build_index(self, filename):
f = open(filename, 'r')
# Documents separated by newline
current_document = 1
# Term position within document
current_position = 1
# Read file line by line
for line in f:
# Check if line is only newline.
# If so, new document and reset term position.
if line == "\n":
current_document += 1
current_position = 1
else:
# Read line word by word ignoring whitespace
for word in line.split():
# Strip punctuation and convert to lowercase
word = word.translate(
str.maketrans("", "", string.punctuation))
word = word.lower()
# Case when stripping punctuation leaves the empty string
if word == "":
continue
# First occurrence of the word:
# add an entry in the dictionary
if word not in self.inverted_index:
# <docid, term frequency in doc, occurrences in doc>
self.inverted_index[word] = [
[current_document, 1, [current_position]]]
# Word seen before: add occurrence
else:
postings = self.inverted_index[word]
# Check if first occurrence of this document by
# checking last document posting.
# If so, new posting
if (postings[-1][0] != current_document):
postings += [
[current_document, 1, [current_position]]]
# Same document, increment freq, add occurrence
else:
postings[-1][1] += 1
postings[-1][2] += [current_position]
# Increment current_position.
current_position += 1
self.num_documents = current_document
f.close();
# Returns the first occurrence of term t in the index
def first(self, t):
if t in self.inverted_index:
postings = self.inverted_index[t]
# (docid, position)
return (postings[0][0], postings[0][2][0])
else:
return "infinity"
# Returns the last occurrence of term t in the index
def last(self, t):
if t in self.inverted_index:
postings = self.inverted_index[t]
# (docid, position)
return (postings[-1][0], postings[-1][2][-1])
else:
return "infinity"
# Returns the previous occurrence of term t before position current
# Uses galloping search
def prev(self, t, current):
if t not in self.inverted_index:
return "-infinity"
# Check if current is before the first position in postings,
# thus no previous occurrence exists.
first_position = (
self.inverted_index[t][0][0], self.inverted_index[t][0][2][0])
# first_position >= current
if self.compare_positions(first_position, current) >= 0:
return "-infinity"
# Last position in postings is less than current, return.
last_position = (
self.inverted_index[t][-1][0], self.inverted_index[t][-1][2][-1])
# last_position < current
if self.compare_positions(last_position, current) < 0:
self.prev_cache[t] = self.num_positions(t) - 1
return last_position
# Initialize high after cached position from the last time prev was
# called if >= current, else start at last position in postings
high = self.num_positions(t) - 1
if (t in self.prev_cache and
self.prev_cache[t] < self.num_positions(t) - 1):
cache_position = self.index_to_position(t, self.prev_cache[t] + 1)
# cache_position >= current
if self.compare_positions(cache_position, current) >= 0:
high = self.prev_cache[t] + 1
jump = 1
low = high - jump
# Begin galloping search, increase size of jumps until low
# passes current or end of positions.
if low >= 0:
low_position = self.index_to_position(t, low)
while (low >= 0 and
self.compare_positions(low_position, current) >= 0):
high = low
jump = 2*jump
low = high - jump
if low >= 0:
low_position = self.index_to_position(t, low)
# Jumped past 0, cap at first position
if low < 0:
low = 0
# Binary search interval that current is contained in.
self.prev_cache[t] = self.binary_search(t, low, high, current, False)
return self.index_to_position(t, self.prev_cache[t])
# Returns the next occurrence of term t after position current
# Uses galloping search
def next(self, t, current):
if t not in self.inverted_index:
return "infinity"
# Check if current is past all positions in postings,
# thus no next occurrence exists.
last_position = (
self.inverted_index[t][-1][0], self.inverted_index[t][-1][2][-1])
# last_position <= current
if self.compare_positions(last_position, current) <= 0:
return "infinity"
# First position in postings is greater than current, return.
first_position = (
self.inverted_index[t][0][0], self.inverted_index[t][0][2][0])
# first_position > current
if self.compare_positions(first_position, current) > 0:
self.next_cache[t] = 0
return first_position
# Initialize low before cached position from the last time next was
# called if <= current, else start at first position in postings
low = 0
if t in self.next_cache and self.next_cache[t] > 0:
cache_position = self.index_to_position(t, self.next_cache[t] - 1)
# cache_position <= current
if self.compare_positions(cache_position, current) <= 0:
low = self.next_cache[t] - 1
jump = 1
high = low + jump
# Begin galloping search, increase size of jumps until high
# passes current or end of positions.
high_position = self.index_to_position(t, high)
while (high < self.num_positions(t) and
self.compare_positions(high_position, current) <= 0):
low = high
jump = 2*jump
high = low + jump
high_position = self.index_to_position(t, high)
# Jumped past last position, cap high at last position
if high >= self.num_positions(t):
high = self.num_positions(t) - 1
# Binary search interval that current is contained in.
self.next_cache[t] = self.binary_search(t, low, high, current, True)
return self.index_to_position(t, self.next_cache[t])
# Binary search through postings of term t in the index.
# Returns the next biggest or smallest posting after current
# depending on is_next
def binary_search(self, t, low, high, current, is_next):
# Loop until current is either low or high, or low and high exactly
# surround current.
while high - low > 1:
mid = low + math.floor((high - low)/2)
mid_position = self.index_to_position(t, mid)
# If looking for position bigger than current,
# keep value at high larger than current.
if is_next:
# mid_position <= current
if self.compare_positions(mid_position, current) <= 0:
low = mid
else:
high = mid
# Looking for position smaller than current,
# keep value at low smaller than current.
else:
# mid_position < current
if self.compare_positions(mid_position, current) < 0:
low = mid
else:
high = mid
if is_next:
return high
else:
return low
# Helper function that compares two term positions of form (doc, position)
# pos1 < pos2 == -1
# pos1 == pos2 == 0
# pos1 > pos2 == 1
def compare_positions(self, pos1, pos2):
if pos1 == "infinity":
return 1
if pos1 == "-infinity":
return -1
if pos2 == "infinity":
return -1
if pos2 == "-infinity":
return 1
# pos1's doc is less than pos2's
if pos1[0] < pos2[0]:
return -1
# same documents, check document positions
elif pos1[0] == pos2[0]:
if pos1[1] < pos2[1]:
return -1
elif pos1[1] == pos2[1]:
return 0
else:
return 1
else:
return 1
# Helper function that returns size of a term's total positions in the
# inverted index
def num_positions(self, t):
result = 0
postings = self.inverted_index[t]
for posting in postings:
result += len(posting[2])
return result
# Helper function that takes a term and an index and finds the
# corresponding position in the term's postings, as if all the term's
# document positions were in one list.
def index_to_position(self, t, index):
postings = self.inverted_index[t]
for posting in postings:
positions = posting[2]
# index is contained in this posting's positions
if len(positions) > index:
# (docid, doc_position)
return (posting[0], positions[index])
# index refers to position in a further posting
else:
index -= len(positions)
# Index greater than total positions
return "infinity"
|
the-stack_0_23381 | # -*- coding: utf-8 -*-
""" Manifest Test Cases
"""
import json
import logging
import tempfile
from contextlib import contextmanager
import django
from django.conf import settings
from django.template import Context, Template
from django.test import RequestFactory, TestCase
from django.test.utils import (
_TestState,
setup_databases,
setup_test_environment,
)
from django.urls import reverse
from django.utils.translation import activate, deactivate
from PIL import Image
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.test import APIClient
from manifest import defaults
TEMPFILE_MEDIA_ROOT = tempfile.mkdtemp()
# If test are not run by Django test runner (eg. IDE),
# setup Django and test environment.
if not hasattr(_TestState, "saved_data"):
django.setup()
setup_test_environment()
setup_databases(1, False)
class ManifestTestCase(TestCase):
"""Base test case that setup fixtures and user locale.
"""
fixtures = ["test"]
logger = logging.getLogger(__name__)
def _pre_setup(self):
activate(settings.LANGUAGE_CODE)
super()._pre_setup()
def setUp(self):
self.factory = RequestFactory()
super().setUp()
def _post_teardown(self):
super()._post_teardown()
deactivate()
# pylint: disable=no-self-use
def render(self, string, context=None):
"""Renders any given string to template.
:param string: Template code to render.
:type string: string
:param context: Template context to be rendered, defaults to None
:type context: dict, optional
:return: Rendered template.
:rtype: string
"""
context = context or {}
context = Context(context)
return Template(string).render(context)
@contextmanager
def defaults(self, **kwargs):
original = {}
for key, value in kwargs.items():
original[key] = getattr(defaults, key, None)
setattr(defaults, key, value)
yield
for key, value in original.items():
setattr(defaults, key, value)
class ManifestAPIClient(APIClient):
"""Test client for REST API tests.
"""
def login(self, **credentials):
super(ManifestAPIClient, self).login(**credentials)
try:
response = self.post(reverse("auth_login_api"), data=credentials)
self.credentials(
HTTP_AUTHORIZATION="JWT " + response.json["token"]
)
except KeyError:
AuthenticationFailed("No Token")
# pylint: disable=bad-continuation,too-many-arguments
def generic(
self,
method,
path,
data="",
content_type="application/json",
secure=False,
**extra,
):
response = super().generic(
method, path, data, content_type, secure, **extra
)
try:
is_json = bool(
# pylint: disable=W0212
[x for x in response._headers["content-type"] if "json" in x]
)
except KeyError:
is_json = False
response.json = {}
if is_json and response.content:
response.json = json.loads(response.content)
return response
class ManifestAPITestCase(ManifestTestCase):
"""Test case for REST API views.
"""
client_class = ManifestAPIClient
class ManifestUploadTestCase(ManifestTestCase):
"""Test case for image uploading views.
"""
# pylint: disable=no-self-use
def create_image(self, suffix=".png", image_format="PNG"):
with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as file:
image = Image.new("RGB", (200, 200), "white")
image.save(file, image_format)
return file
def get_file(self, file):
return open(file.name, mode="rb")
# pylint: disable=no-self-use
def get_raw_file(self, file):
from django.core.files.uploadedfile import SimpleUploadedFile
return SimpleUploadedFile(
name=file.name,
content=open(file.name, "rb").read(),
content_type="image/png",
)
def setUp(self):
self.image_file = self.get_file(self.create_image())
self.raw_image_file = self.get_raw_file(self.image_file)
super().setUp()
def tearDown(self):
self.image_file.close()
self.raw_image_file.close()
@contextmanager
def upload_limit(self):
old = defaults.MANIFEST_PICTURE_MAX_FILE
defaults.MANIFEST_PICTURE_MAX_FILE = 1
yield
defaults.MANIFEST_PICTURE_MAX_FILE = old
|
the-stack_0_23383 | # -*- coding: utf-8 -*-
import asyncio
import json
import copy
from collections import namedtuple
from distutils.version import StrictVersion
from functools import wraps
from typing import Callable, Dict, Tuple, Union, Optional, List # noqa
from unittest.mock import Mock, patch
import inspect
from aiohttp import (
ClientConnectionError,
ClientResponse,
ClientSession,
hdrs,
http
)
from aiohttp.helpers import TimerNoop
from multidict import CIMultiDict, CIMultiDictProxy
from .compat import (
AIOHTTP_VERSION,
URL,
Pattern,
stream_reader_factory,
merge_params,
normalize_url,
)
class CallbackResult:
def __init__(self, method: str = hdrs.METH_GET,
status: int = 200,
body: str = '',
content_type: str = 'application/json',
payload: Dict = None,
headers: Dict = None,
response_class: 'ClientResponse' = None,
reason: Optional[str] = None):
self.method = method
self.status = status
self.body = body
self.content_type = content_type
self.payload = payload
self.headers = headers
self.response_class = response_class
self.reason = reason
class RequestMatch(object):
url_or_pattern = None # type: Union[URL, Pattern]
def __init__(self, url: Union[URL, str, Pattern],
method: str = hdrs.METH_GET,
status: int = 200,
body: str = '',
payload: Dict = None,
exception: 'Exception' = None,
headers: Dict = None,
content_type: str = 'application/json',
response_class: 'ClientResponse' = None,
timeout: bool = False,
repeat: bool = False,
reason: Optional[str] = None,
callback: Optional[Callable] = None):
if isinstance(url, Pattern):
self.url_or_pattern = url
self.match_func = self.match_regexp
else:
self.url_or_pattern = normalize_url(url)
self.match_func = self.match_str
self.method = method.lower()
self.status = status
self.body = body
self.payload = payload
self.exception = exception
if timeout:
self.exception = asyncio.TimeoutError('Connection timeout test')
self.headers = headers
self.content_type = content_type
self.response_class = response_class
self.repeat = repeat
self.reason = reason
if self.reason is None:
try:
self.reason = http.RESPONSES[self.status][0]
except (IndexError, KeyError):
self.reason = ''
self.callback = callback
def match_str(self, url: URL) -> bool:
return self.url_or_pattern == url
def match_regexp(self, url: URL) -> bool:
return bool(self.url_or_pattern.match(str(url)))
def match(self, method: str, url: URL) -> bool:
if self.method != method.lower():
return False
return self.match_func(url)
def _build_raw_headers(self, headers: Dict) -> Tuple:
"""
Convert a dict of headers to a tuple of tuples
Mimics the format of ClientResponse.
"""
raw_headers = []
for k, v in headers.items():
raw_headers.append((k.encode('utf8'), v.encode('utf8')))
return tuple(raw_headers)
def _build_response(self, url: 'Union[URL, str]',
method: str = hdrs.METH_GET,
request_headers: Dict = None,
status: int = 200,
body: str = '',
content_type: str = 'application/json',
payload: Dict = None,
headers: Dict = None,
response_class: 'ClientResponse' = None,
reason: Optional[str] = None) -> ClientResponse:
if response_class is None:
response_class = ClientResponse
if payload is not None:
body = json.dumps(payload)
if not isinstance(body, bytes):
body = str.encode(body)
if request_headers is None:
request_headers = {}
kwargs = {}
if AIOHTTP_VERSION >= StrictVersion('3.1.0'):
loop = Mock()
loop.get_debug = Mock()
loop.get_debug.return_value = True
kwargs['request_info'] = Mock(
url=url,
method=method,
headers=CIMultiDictProxy(CIMultiDict(**request_headers)),
)
kwargs['writer'] = Mock()
kwargs['continue100'] = None
kwargs['timer'] = TimerNoop()
if AIOHTTP_VERSION < StrictVersion('3.3.0'):
kwargs['auto_decompress'] = True
kwargs['traces'] = []
kwargs['loop'] = loop
kwargs['session'] = None
else:
loop = None
# We need to initialize headers manually
_headers = CIMultiDict({hdrs.CONTENT_TYPE: content_type})
if headers:
_headers.update(headers)
raw_headers = self._build_raw_headers(_headers)
resp = response_class(method, url, **kwargs)
for hdr in _headers.getall(hdrs.SET_COOKIE, ()):
resp.cookies.load(hdr)
if AIOHTTP_VERSION >= StrictVersion('3.3.0'):
# Reified attributes
resp._headers = _headers
resp._raw_headers = raw_headers
else:
resp.headers = _headers
resp.raw_headers = raw_headers
resp.status = status
resp.reason = reason
resp.content = stream_reader_factory(loop)
resp.content.feed_data(body)
resp.content.feed_eof()
return resp
async def build_response(
self, url: URL, **kwargs
) -> 'Union[ClientResponse, Exception]':
if self.exception is not None:
return self.exception
if callable(self.callback):
if asyncio.iscoroutinefunction(self.callback):
result = await self.callback(url, **kwargs)
else:
result = self.callback(url, **kwargs)
else:
result = None
result = self if result is None else result
resp = self._build_response(
url=url,
method=result.method,
request_headers=kwargs.get("headers"),
status=result.status,
body=result.body,
content_type=result.content_type,
payload=result.payload,
headers=result.headers,
response_class=result.response_class,
reason=result.reason)
return resp
RequestCall = namedtuple('RequestCall', ['args', 'kwargs'])
class aioresponses(object):
"""Mock aiohttp requests made by ClientSession."""
_matches = None # type: List[RequestMatch]
_responses = None # type: List[ClientResponse]
requests = None # type: Dict
def __init__(self, **kwargs):
self._param = kwargs.pop('param', None)
self._passthrough = kwargs.pop('passthrough', [])
self.patcher = patch('aiohttp.client.ClientSession._request',
side_effect=self._request_mock,
autospec=True)
self.requests = {}
def __enter__(self) -> 'aioresponses':
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def __call__(self, f):
def _pack_arguments(ctx, *args, **kwargs) -> Tuple[Tuple, Dict]:
if self._param:
kwargs[self._param] = ctx
else:
args += (ctx,)
return args, kwargs
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def wrapped(*args, **kwargs):
with self as ctx:
args, kwargs = _pack_arguments(ctx, *args, **kwargs)
return await f(*args, **kwargs)
else:
@wraps(f)
def wrapped(*args, **kwargs):
with self as ctx:
args, kwargs = _pack_arguments(ctx, *args, **kwargs)
return f(*args, **kwargs)
return wrapped
def clear(self):
self._responses.clear()
self._matches.clear()
def start(self):
self._responses = []
self._matches = []
self.patcher.start()
self.patcher.return_value = self._request_mock
def stop(self) -> None:
for response in self._responses:
response.close()
self.patcher.stop()
self.clear()
def head(self, url: 'Union[URL, str, Pattern]', **kwargs):
self.add(url, method=hdrs.METH_HEAD, **kwargs)
def get(self, url: 'Union[URL, str, Pattern]', **kwargs):
self.add(url, method=hdrs.METH_GET, **kwargs)
def post(self, url: 'Union[URL, str, Pattern]', **kwargs):
self.add(url, method=hdrs.METH_POST, **kwargs)
def put(self, url: 'Union[URL, str, Pattern]', **kwargs):
self.add(url, method=hdrs.METH_PUT, **kwargs)
def patch(self, url: 'Union[URL, str, Pattern]', **kwargs):
self.add(url, method=hdrs.METH_PATCH, **kwargs)
def delete(self, url: 'Union[URL, str, Pattern]', **kwargs):
self.add(url, method=hdrs.METH_DELETE, **kwargs)
def options(self, url: 'Union[URL, str, Pattern]', **kwargs):
self.add(url, method=hdrs.METH_OPTIONS, **kwargs)
def add(self, url: 'Union[URL, str, Pattern]', method: str = hdrs.METH_GET,
status: int = 200,
body: str = '',
exception: 'Exception' = None,
content_type: str = 'application/json',
payload: Dict = None,
headers: Dict = None,
response_class: 'ClientResponse' = None,
repeat: bool = False,
timeout: bool = False,
reason: Optional[str] = None,
callback: Optional[Callable] = None) -> None:
self._matches.append(RequestMatch(
url,
method=method,
status=status,
content_type=content_type,
body=body,
exception=exception,
payload=payload,
headers=headers,
response_class=response_class,
repeat=repeat,
timeout=timeout,
reason=reason,
callback=callback,
))
@staticmethod
def is_exception(resp_or_exc: Union[ClientResponse, Exception]) -> bool:
if inspect.isclass(resp_or_exc):
parent_classes = set(inspect.getmro(resp_or_exc))
if {Exception, BaseException} & parent_classes:
return True
else:
if isinstance(resp_or_exc, (Exception, BaseException)):
return True
return False
async def match(
self, method: str, url: URL,
allow_redirects: bool = True, **kwargs: Dict
) -> Optional['ClientResponse']:
history = []
while True:
for i, matcher in enumerate(self._matches):
if matcher.match(method, url):
response_or_exc = await matcher.build_response(
url, allow_redirects=allow_redirects, **kwargs
)
break
else:
return None
if matcher.repeat is False:
del self._matches[i]
if self.is_exception(response_or_exc):
raise response_or_exc
if response_or_exc.status in (
301, 302, 303, 307, 308) and allow_redirects:
if hdrs.LOCATION not in response_or_exc.headers:
break
history.append(response_or_exc)
url = URL(response_or_exc.headers[hdrs.LOCATION])
continue
else:
break
response_or_exc._history = tuple(history)
return response_or_exc
async def _request_mock(self, orig_self: ClientSession,
method: str, url: 'Union[URL, str]',
*args: Tuple,
**kwargs: Dict) -> 'ClientResponse':
"""Return mocked response object or raise connection error."""
if orig_self.closed:
raise RuntimeError('Session is closed')
url_origin = url
url = normalize_url(merge_params(url, kwargs.get('params')))
url_str = str(url)
for prefix in self._passthrough:
if url_str.startswith(prefix):
return (await self.patcher.temp_original(
orig_self, method, url_origin, *args, **kwargs
))
key = (method, url)
self.requests.setdefault(key, [])
try:
kwargs_copy = copy.deepcopy(kwargs)
except TypeError:
# Handle the fact that some values cannot be deep copied
kwargs_copy = kwargs
self.requests[key].append(RequestCall(args, kwargs_copy))
response = await self.match(method, url, **kwargs)
if response is None:
raise ClientConnectionError(
'Connection refused: {} {}'.format(method, url)
)
self._responses.append(response)
# Automatically call response.raise_for_status() on a request if the
# request was initialized with raise_for_status=True. Also call
# response.raise_for_status() if the client session was initialized
# with raise_for_status=True, unless the request was called with
# raise_for_status=False.
raise_for_status = kwargs.get('raise_for_status')
if raise_for_status is None:
raise_for_status = getattr(
orig_self, '_raise_for_status', False
)
if raise_for_status:
response.raise_for_status()
return response
|
the-stack_0_23386 | #!/usr/bin/env python
import sys
import time
import pygame
import serial
pygame.init() # initiaise pygame
controls = pygame.joystick.Joystick(0) # call the joystic controls
clock = pygame.time.Clock() # intialise pygame refresh and call it clock
controls.init() # initialise the controls
#arduino = serial.Serial('/dev/ttyUSB0', 9600,timeout = 1) # connect to the arduino's serial port
#time.sleep(2)
arduino = serial.Serial('COM9', 9600,timeout = 1) # connect to the arduino's serial port
time.sleep(2)
EXIT = False
old_min = -1
old_max = 1
new_min = 100
new_max = 355
def valueMap(old_value):
new_value = str(int(round(( ( old_value - old_min ) / (old_max - old_min) ) * (new_max - new_min) + new_min)))
return(new_value)
while not EXIT:
for event in pygame.event.get():
if event.type == pygame.QUIT:
EXIT = True
controllerName = str(controls.get_name())
axesNumber = controls.get_numaxes()
hatNumber = controls.get_numhats()
buttonNumber = controls.get_numbuttons()
a = valueMap(controls.get_axis(0))
b = valueMap(controls.get_axis(1))
c = valueMap(controls.get_axis(2))
d = valueMap(controls.get_axis(3))
e = '000'
f = '0000'
for x in range(buttonNumber):
if controls.get_button(x) == 1:
e = '0' + str(x+1)
if x < 9:
e = '00' + str(x+1)
for positionHat in range(hatNumber):
hat = controls.get_hat(positionHat)
if hat[0] == -1:
f = '1000'
elif hat[0] == 1:
f = '0100'
if hat[1] == 1:
f = '0010'
elif hat[1] == -1:
f = '0001'
control = ['<' + a,b,c,d,e,f + '>'] # save strings in a list
cstring = ",".join(control) # convert list to a single string with commas seperating values
print(cstring)
arduino.write(cstring) # print string to shell and write data to arduino with a 0.1ms delay
time.sleep(0.0001)
if controls.get_button(12) == 1:
pygame.quit() # This is used to quit pygame and use any internal program within the python
quit()
clock.tick(1000)
pygame.quit()
quit()
|
the-stack_0_23387 | from __future__ import division
from builtins import object
from past.utils import old_div
from fuzzywuzzy import fuzz
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from translation.translationConstants import TranslationConstants
from utils.utilities import Utilities
class SimilarityMetrics(object):
@staticmethod
def textMatch(ehr, fhir, highestCompositeResult=TranslationConstants.COMPOSITE_STRING_SIMILARITY_HIGHEST_COMPOSITE_RESULT, textSimilarityThreshold=TranslationConstants.OVERALL_SIMILARITY_THRESHOLD):
if (SimilarityMetrics.compositeStringSimilarity(ehr, fhir, SimilarityMetrics.textSimilarity, [], highestCompositeResult) * TranslationConstants.TEXT_SIMILARITY_WEIGHTING >= textSimilarityThreshold):
return True;
else:
return False;
@staticmethod
def morphologicalMatch(ehr, fhir, highestCompositeResult=TranslationConstants.COMPOSITE_STRING_SIMILARITY_HIGHEST_COMPOSITE_RESULT, morphologicalSimilarityThreshold=TranslationConstants.OVERALL_SIMILARITY_THRESHOLD):
if (SimilarityMetrics.compositeStringSimilarity(ehr, fhir, SimilarityMetrics.morphologicalSimilarity, [], highestCompositeResult) * TranslationConstants.MORPHOLOGICAL_SIMILARITY_WEIGHTING >= morphologicalSimilarityThreshold):
return True;
else:
return False;
# Similarity Metric A
@staticmethod
def textSimilarity(ehrAttribute, fhirAttribute, stem=False):
if stem:
stemmer = PorterStemmer()
ehrAttribute = stemmer.stem(ehrAttribute);
fhirAttribute = stemmer.stem(fhirAttribute);
textSimilarity = fuzz.ratio(ehrAttribute.lower(), fhirAttribute.lower()) / 100.0;
return textSimilarity;
wordsToTypes = {};
synsetToDefinitionTerms = {};
# Similarity Metric B
@staticmethod
def morphologicalSimilarity(ehrAttribute, fhirAttribute, lemmaSimilarityThreshold=TranslationConstants.MORPHOLOGICAL_SIMILARITY_THRESHOLD):
if SimilarityMetrics.textMatch(ehrAttribute, fhirAttribute): return 1;
highestSimilarity = 0;
for lemma in Utilities.lemmas(ehrAttribute):
if SimilarityMetrics.textSimilarity(lemma, fhirAttribute, True) > highestSimilarity and SimilarityMetrics.textMatch(lemma, fhirAttribute, True, lemmaSimilarityThreshold):
highestSimilarity = SimilarityMetrics.textSimilarity(lemma, fhirAttribute, True);
return highestSimilarity;
# Similarity Metric C
@staticmethod
def semanticSimilarity(ehrAttribute, fhirAttribute, useDefinition=False, alsoUseMorphologicalSimilarity=False, morphologicalSimilarityThreshold=TranslationConstants.MORPHOLOGICAL_SIMILARITY_THRESHOLD, compositeSynonyms=False, highestResult=True ):
# If these attributes would be associated via a text match instead, then don't also reevaluate their similarity via the text similarity below.
if SimilarityMetrics.textMatch(ehrAttribute, fhirAttribute, False): return 0;
highestSimilarity = 0;
# wordnet requires word separation by underscore, whereas EHR XML responses (for TPP at least) use camelCase (this won't be an issue if used with composite string similarity, where only one word is used at a time).
for set in wordnet.synsets(Utilities.capitalToSeparation(ehrAttribute)):
synonyms = set.lemma_names();
if useDefinition:
setType = set.pos();
associatedSynonyms = [];
if ( set not in SimilarityMetrics.synsetToDefinitionTerms ):
# We also include words from the definition of this word, that are of the same grammatical type (e.g. noun or verb), as potential synonyms.
for word in set.definition().split(" "):
if ( len(word) <= 3 or word in associatedSynonyms or "." in word ): continue;
if ( word not in SimilarityMetrics.wordsToTypes ):
wordSynset = wordnet.synsets(word);
if not len(wordSynset): continue;
# Find most popular interpretation of this word, so can find right grammatical form.
chosenSynset = wordSynset[0];
highestLemmaPopularity = 0;
for set in wordSynset:
for lemma in set.lemmas():
if lemma.count() > highestLemmaPopularity:
highestLemmaPopularity = lemma.count();
chosenSynset = set;
SimilarityMetrics.wordsToTypes[word] = chosenSynset.pos();
if ( SimilarityMetrics.wordsToTypes[word] == setType ):
associatedSynonyms.append(word);
SimilarityMetrics.synsetToDefinitionTerms[set] = associatedSynonyms;
synonyms = synonyms + SimilarityMetrics.synsetToDefinitionTerms[set];
for synonym in synonyms:
# Do we want the highest value across all components of the synonym, or just the synonym directy.
if ( compositeSynonyms ):
textSimilarity = SimilarityMetrics.compositeStringSimilarity(Utilities.separationToCapital(synonym), fhirAttribute, SimilarityMetrics.textSimilarity, [], highestResult);
else:
textSimilarity = SimilarityMetrics.textSimilarity(Utilities.separationToCapital(synonym), fhirAttribute);
# Synonyms may also be grammatical variants as opposed to just text matches.
if ( alsoUseMorphologicalSimilarity ):
if ( compositeSynonyms ):
morphologicalSimilarity = SimilarityMetrics.compositeStringSimilarity(Utilities.separationToCapital(synonym), fhirAttribute, SimilarityMetrics.morphologicalSimilarity, [morphologicalSimilarityThreshold], highestResult);
else:
morphologicalSimilarity = SimilarityMetrics.morphologicalSimilarity(synoynm, fhirAttribute);
else:
morphologicalSimilarity = 0;
# Get similarity between synonym for ehrAttribute and fhirAttribute (not synonyms that are the ehr attribute itself). If this is over a given threshold, AND it is greater than previously marked highest values, update highest similarity.
if not SimilarityMetrics.textSimilarity(synonym, ehrAttribute) == 1.0 and max(textSimilarity, morphologicalSimilarity) > highestSimilarity:
highestSimilarity = max(textSimilarity, morphologicalSimilarity);
return highestSimilarity;
# Similarity Metric D - Sentence progression? e.g. "Done at" and "Location"
######
# With highest result False, there needs to be a stricter connection between the class or fields. Probably best for child fields to have stricter match rules.
@staticmethod
def compositeStringSimilarity(ehrClassField, fhirClassField, comparisonMethod, comparisonMethodArgs=[], highestResult=True, removeStopwords=True):
if ( comparisonMethod(ehrClassField, fhirClassField, *comparisonMethodArgs) == 1 ): return 1;
# If ehrClass string is composite, compare each word with the FHIR target using all of the metrics, and then use chosen combination method to produce a value, e.g. for each word, add these values, and then divide by number of words to get an average match across all words or return highest.
highestSimilarity = 0;
highestSimilarityWord = "";
totalSimilarity = 0;
ehrWords = Utilities.listFromCapitals(ehrClassField);
fhirWords = Utilities.listFromCapitals(fhirClassField);
if (removeStopwords): ehrWords = [word for word in ehrWords if word.lower() not in stopwords.words('english')];
for ehrWord in ehrWords:
highestSimilarityForEHRWord = 0;
for fhirWord in fhirWords:
similarity = comparisonMethod(ehrWord, fhirWord, *comparisonMethodArgs);
if ( similarity > highestSimilarity ):
highestSimilarity = similarity;
highestSimilarityWord = ehrWord;
if ( similarity > highestSimilarityForEHRWord ): highestSimilarityForEHRWord = similarity;
totalSimilarity += highestSimilarityForEHRWord;
if ( highestResult and len(highestSimilarityWord) > TranslationConstants.LENGTH_TO_IGNORE_IN_COMPOSITE_HIGHEST ):
return highestSimilarity;
else:
return old_div(totalSimilarity, max(float(len(ehrWords)), float(len(fhirWords))));
######
|
the-stack_0_23388 | #!/usr/bin/env python3
# Note: this file is modeled after td3.py
import logging
from typing import Dict, Optional
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import (
EvaluationParameters,
NormalizationData,
NormalizationKey,
param_hash,
)
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.model_managers.discrete_dqn_base import DiscreteDQNBase
from reagent.models.base import ModelBase
from reagent.net_builder.discrete_actor.fully_connected import (
FullyConnected as DiscreteFullyConnected,
)
from reagent.net_builder.discrete_dqn.dueling import Dueling
from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected
from reagent.net_builder.unions import (
DiscreteActorNetBuilder__Union,
DiscreteDQNNetBuilder__Union,
)
from reagent.reporting.discrete_crr_reporter import DiscreteCRRReporter
from reagent.training import DiscreteCRRTrainer, CRRTrainerParameters
from reagent.training import ReAgentLightningModule
from reagent.workflow.types import RewardOptions
logger = logging.getLogger(__name__)
class ActorPolicyWrapper(Policy):
"""Actor's forward function is our act"""
def __init__(self, actor_network):
self.actor_network = actor_network
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def act(
self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None
) -> rlt.ActorOutput:
self.actor_network.eval()
output = self.actor_network(obs)
self.actor_network.train()
return output.detach().cpu()
@dataclass
class DiscreteCRR(DiscreteDQNBase):
__hash__ = param_hash
trainer_param: CRRTrainerParameters = field(default_factory=CRRTrainerParameters)
actor_net_builder: DiscreteActorNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: DiscreteActorNetBuilder__Union(
FullyConnected=DiscreteFullyConnected()
)
)
critic_net_builder: DiscreteDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling())
)
cpe_net_builder: DiscreteDQNNetBuilder__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
default_factory=lambda: DiscreteDQNNetBuilder__Union(
FullyConnected=FullyConnected()
)
)
eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
assert (
len(self.action_names) > 1
), f"DiscreteCRRModel needs at least 2 actions. Got {self.action_names}."
@property
def action_names(self):
return self.trainer_param.actions
@property
def rl_parameters(self):
return self.trainer_param.rl
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> DiscreteCRRTrainer:
actor_net_builder = self.actor_net_builder.value
actor_network = actor_net_builder.build_actor(
normalization_data_map[NormalizationKey.STATE], len(self.action_names)
)
actor_network_target = actor_network.get_target_network()
# The arguments to q_network1 and q_network2 below are modeled after those in discrete_dqn.py
critic_net_builder = self.critic_net_builder.value
q1_network = critic_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
len(self.action_names),
)
q1_network_target = q1_network.get_target_network()
q2_network = q2_network_target = None
# pyre-fixme[16]: `CRRTrainerParameters` has no attribute
# `double_q_learning`.
if self.trainer_param.double_q_learning:
q2_network = critic_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
len(self.action_names),
)
q2_network_target = q2_network.get_target_network()
reward_options = reward_options or RewardOptions()
metrics_to_score = get_metrics_to_score(reward_options.metric_reward_values)
reward_network, q_network_cpe, q_network_cpe_target = None, None, None
if self.eval_parameters.calc_cpe_in_training:
# Metrics + reward
num_output_nodes = (len(metrics_to_score) + 1) * len(
# pyre-fixme[16]: `CRRTrainerParameters` has no attribute `actions`.
self.trainer_param.actions
)
cpe_net_builder = self.cpe_net_builder.value
reward_network = cpe_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
num_output_nodes,
)
q_network_cpe = cpe_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
num_output_nodes,
)
q_network_cpe_target = q_network_cpe.get_target_network()
trainer = DiscreteCRRTrainer(
actor_network=actor_network,
actor_network_target=actor_network_target,
q1_network=q1_network,
q1_network_target=q1_network_target,
reward_network=reward_network,
q2_network=q2_network,
q2_network_target=q2_network_target,
q_network_cpe=q_network_cpe,
q_network_cpe_target=q_network_cpe_target,
metrics_to_score=metrics_to_score,
evaluation=self.eval_parameters,
# pyre-fixme[16]: `CRRTrainerParameters` has no attribute `asdict`.
**self.trainer_param.asdict(),
)
return trainer
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
) -> Policy:
"""Create online actor critic policy."""
assert isinstance(trainer_module, DiscreteCRRTrainer)
if serving:
assert normalization_data_map
return create_predictor_policy_from_model(
self.build_actor_module(trainer_module, normalization_data_map)
)
else:
return ActorPolicyWrapper(trainer_module.actor_network)
def get_reporter(self):
return DiscreteCRRReporter(
self.trainer_param.actions,
target_action_distribution=self.target_action_distribution,
)
# Note: when using test_gym.py as the entry point, the normalization data
# is set when the line normalization = build_normalizer(env) is executed.
# The code then calls build_state_normalizer() and build_action_normalizer()
# in utils.py
def serving_module_names(self):
module_names = ["default_model", "dqn", "actor_dqn"]
if len(self.action_names) == 2:
module_names.append("binary_difference_scorer")
return module_names
def build_serving_modules(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
):
"""
`actor_dqn` is the actor module wrapped in the DQN predictor wrapper.
This helps putting the actor in places where DQN predictor wrapper is expected.
If the policy is greedy, then this wrapper would work.
"""
assert isinstance(trainer_module, DiscreteCRRTrainer)
serving_modules = {
"default_model": self.build_actor_module(
trainer_module, normalization_data_map
),
"dqn": self._build_dqn_module(
trainer_module.q1_network, normalization_data_map
),
"actor_dqn": self._build_dqn_module(
ActorDQN(trainer_module.actor_network), normalization_data_map
),
}
if len(self.action_names) == 2:
serving_modules.update(
{
"binary_difference_scorer": self._build_binary_difference_scorer(
ActorDQN(trainer_module.actor_network), normalization_data_map
),
}
)
return serving_modules
def _build_dqn_module(
self,
network,
normalization_data_map: Dict[str, NormalizationData],
):
critic_net_builder = self.critic_net_builder.value
assert network is not None
return critic_net_builder.build_serving_module(
network,
normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
def _build_binary_difference_scorer(
self,
network,
normalization_data_map: Dict[str, NormalizationData],
):
critic_net_builder = self.critic_net_builder.value
assert network is not None
return critic_net_builder.build_binary_difference_scorer(
network,
normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
# Also, even though the build_serving_module below is directed to
# discrete_actor_net_builder.py, which returns ActorPredictorWrapper,
# just like in the continuous_actor_net_builder.py, the outputs of the
# discrete actor will still be computed differently from those of the
# continuous actor because during serving, the act() function for the
# Agent class in gym/agents/agents.py returns
# self.action_extractor(actor_output), which is created in
# create_for_env_with_serving_policy, when
# env.get_serving_action_extractor() is called. During serving,
# action_extractor calls serving_action_extractor() in env_wrapper.py,
# which checks the type of action_space during serving time and treats
# spaces.Discrete differently from spaces.Box (continuous).
def build_actor_module(
self,
trainer_module: DiscreteCRRTrainer,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
net_builder = self.actor_net_builder.value
return net_builder.build_serving_module(
trainer_module.actor_network,
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
action_feature_ids=list(range(len(self.action_names))),
)
class ActorDQN(ModelBase):
def __init__(self, actor):
super().__init__()
self.actor = actor
def input_prototype(self):
return self.actor.input_prototype()
def forward(self, state):
return self.actor(state).action
|
the-stack_0_23389 | import tensorflow as tf
import numpy as np
import math
from PIL import Image
from tqdm import tqdm
import os
import h5py
# Read image
def imread(fname):
return Image.open(fname)
# Save image
def imsave(image, path, fname):
image = image * 255.
image = Image.fromarray(image.astype('uint8'), mode='YCbCr')
image = image.convert('RGB')
return image.save(os.path.join(path, fname))
# Save ground truth image, bicubic interpolated image and srcnn image
def save_result(path, gt, bicubic, srcnn, i):
imsave(gt, path, str(i)+ '_gt.png')
imsave(bicubic, path, str(i) + '_bicubic.png')
imsave(srcnn, path, str(i) + '_vdsr.png')
# Return true if the h5 sub-images file is exists
def exist_train_data(datasetname):
return os.path.exists('{}.h5'.format(datasetname))
# Concatenate Y and CrCb channel
def concat_ycrcb(y, crcb):
return np.concatenate((y, crcb), axis=2)
def psnr(gt, sr, shave=0, max_val=1.):
diff = gt[shave:-shave, shave:-shave] - sr[shave:-shave, shave:-shave]
diff = diff.flatten()
rmse = math.sqrt(np.mean(diff ** 2))
return 20 * math.log10(max_val / rmse)
def prepare_data(path, scale, is_valid=False):
dir_path = os.path.join(os.getcwd(), path)
path_gt = os.path.join(dir_path, 'gt')
path_lr = os.path.join(dir_path, 'bicubic_{:d}x'.format(scale))
# fnames = ['baby_GT.bmp, bird_GT.bmp, ...']
fnames = os.listdir(path_gt)
inputs = []
labels = []
count = 0
for fname in tqdm(fnames, desc='[*] Generating dataset ... '):
count += 1
_input = imread(os.path.join(path_lr, fname))
_label = imread(os.path.join(path_gt, fname))
_input = np.array(_input) / 255.
_label = np.array(_label) / 255.
_label = _label[:_label.shape[0] - np.mod(_label.shape[0], scale), :_label.shape[1] - np.mod(_label.shape[1], scale)]
#_label = _label[:_label.shape[0]//scale, :_label.shape[1]//scale]
if is_valid:
h, w, _ = _input.shape
_input_y = _input[:, :, 0]
_label_y = _label[:, :, 0]
_input_y = _input_y.reshape([1, h, w, 1])
_label_y = _label_y.reshape([1, h, w, 1])
inputs.append(_input_y)
labels.append(_label_y)
else:
inputs.append(_input)
labels.append(_label)
if is_valid:
print('[*] Successfully prepared {:d} valid images !'.format(count))
else:
print('[*] Successfully prepared {:d} test images !'.format(count))
return inputs, labels
|
the-stack_0_23390 | from conans.model import registered_generators
from conans.util.files import save, normalize
from os.path import join
from .text import TXTGenerator
from .gcc import GCCGenerator
from .cmake import CMakeGenerator
from .qmake import QmakeGenerator
from .qbs import QbsGenerator
from .visualstudio import VisualStudioGenerator
from .xcode import XCodeGenerator
from .ycm import YouCompleteMeGenerator
def _save_generator(name, klass):
if name not in registered_generators:
registered_generators.add(name, klass)
_save_generator("txt", TXTGenerator)
_save_generator("gcc", GCCGenerator)
_save_generator("cmake", CMakeGenerator)
_save_generator("qmake", QmakeGenerator)
_save_generator("qbs", QbsGenerator)
_save_generator("visual_studio", VisualStudioGenerator)
_save_generator("xcode", XCodeGenerator)
_save_generator("ycm", YouCompleteMeGenerator)
def write_generators(conanfile, path, output):
""" produces auxiliary files, required to build a project or a package.
"""
from conans.model.build_info import CppInfo
conanfile.cpp_info = CppInfo(path)
conanfile.cpp_info.dependencies = []
conanfile.package_info()
for generator_name in conanfile.generators:
if generator_name not in registered_generators:
output.warn("Invalid generator '%s'. Available types: %s" %
(generator_name, ", ".join(registered_generators.available)))
else:
generator_class = registered_generators[generator_name]
try:
generator = generator_class(conanfile)
except TypeError:
# To allow old-style generator packages to work (e.g. premake)
output.warn("Generator %s failed with new __init__(), trying old one")
generator = generator_class(conanfile.deps_cpp_info, conanfile.cpp_info)
output.info("Generated %s created %s" % (generator_name, generator.filename))
content = normalize(generator.content)
save(join(path, generator.filename), content)
|
the-stack_0_23391 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs both the Python and Java tests."""
import sys
import time
from pylib import apk_info
from pylib import test_options_parser
from pylib import run_java_tests
from pylib import run_python_tests
from pylib import run_tests_helper
from pylib.test_result import TestResults
def SummarizeResults(java_results, python_results, annotation):
"""Summarize the results from the various test types.
Args:
java_results: a TestResults object with java test case results.
python_results: a TestResults object with python test case results.
annotation: the annotation used for these results.
Returns:
A tuple (all_results, summary_string, num_failing)
"""
all_results = TestResults.FromTestResults([java_results, python_results])
summary_string = all_results.LogFull('Instrumentation', annotation)
num_failing = (len(all_results.failed) + len(all_results.crashed) +
len(all_results.unknown))
return all_results, summary_string, num_failing
def DispatchInstrumentationTests(options):
"""Dispatches the Java and Python instrumentation tests, sharding if possible.
Uses the logging module to print the combined final results and
summary of the Java and Python tests. If the java_only option is set, only
the Java tests run. If the python_only option is set, only the python tests
run. If neither are set, run both Java and Python tests.
Args:
options: command-line options for running the Java and Python tests.
Returns:
An integer representing the number of failing tests.
"""
start_date = int(time.time() * 1000)
java_results = TestResults()
python_results = TestResults()
if options.run_java_tests:
java_results = run_java_tests.DispatchJavaTests(
options,
[apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path)])
if options.run_python_tests:
python_results = run_python_tests.DispatchPythonTests(options)
all_results, summary_string, num_failing = SummarizeResults(
java_results, python_results, options.annotation)
return num_failing
def main(argv):
options = test_options_parser.ParseInstrumentationArgs(argv)
run_tests_helper.SetLogLevel(options.verbose_count)
return DispatchInstrumentationTests(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
the-stack_0_23393 | from libp2p.io.abc import Reader
from libp2p.io.exceptions import IncompleteReadError
DEFAULT_RETRY_READ_COUNT = 100
async def read_exactly(
reader: Reader, n: int, retry_count: int = DEFAULT_RETRY_READ_COUNT
) -> bytes:
"""
NOTE: relying on exceptions to break out on erroneous conditions, like EOF
"""
data = await reader.read(n)
for _ in range(retry_count):
if len(data) < n:
remaining = n - len(data)
data += await reader.read(remaining)
else:
return data
raise IncompleteReadError({"requested_count": n, "received_count": len(data)})
|
the-stack_0_23394 | r"""
Weight 1 modular forms
This module contains routines for computing weight 1 modular forms, using
George Schaeffer's "Hecke stability" algorithm (detailed in [Sch2015]_). These
functions are mostly for internal use; a more convenient interface is offered
by the usual ModularForms and CuspForms constructors.
AUTHORS:
- David Loeffler (2017-11): first version
"""
from sage.misc.cachefunc import cached_function
from sage.rings.all import PowerSeriesRing, ZZ
from sage.misc.misc import verbose
from sage.structure.sequence import Sequence
from sage.modular.arithgroup.all import Gamma0, GammaH
from sage.modular.arithgroup.arithgroup_generic import ArithmeticSubgroup
@cached_function
def modular_ratio_space(chi):
r"""
Compute the space of 'modular ratios', i.e. meromorphic modular forms f
level N and character chi such that f * E is a holomorphic cusp form for
every Eisenstein series E of weight 1 and character 1/chi.
Elements are returned as q-expansions up to precision R, where R is one
greater than the weight 3 Sturm bound.
EXAMPLES::
sage: chi = DirichletGroup(31,QQ).0
sage: sage.modular.modform.weight1.modular_ratio_space(chi)
[q - 8/3*q^3 + 13/9*q^4 + 43/27*q^5 - 620/81*q^6 + 1615/243*q^7 + 3481/729*q^8 + O(q^9),
q^2 - 8/3*q^3 + 13/9*q^4 + 70/27*q^5 - 620/81*q^6 + 1858/243*q^7 + 2752/729*q^8 + O(q^9)]
"""
from sage.modular.modform.constructor import EisensteinForms, CuspForms
if chi(-1) == 1: return []
N = chi.modulus()
chi = chi.minimize_base_ring()
K = chi.base_ring()
R = Gamma0(N).sturm_bound(3) + 1
verbose("Working precision is %s" % R, level=1)
verbose("Coeff field is %s" % K, level=1)
V = K**R
I = V
d = I.rank()
t = verbose("Calculating Eisenstein forms in weight 1...",level=1)
B0 = EisensteinForms(~chi, 1).q_echelon_basis(prec=R)
B = [b + B0[0] for b in B0]
verbose("Done (dimension %s)" % len(B),level=1,t=t)
t = verbose("Calculating in weight 2...", level=1)
C = CuspForms(Gamma0(N), 2).q_echelon_basis(prec=R)
verbose("Done (dimension %s)" % len(C), t=t, level=1)
t = verbose("Computing candidate space", level=1)
for b in B:
quots = (c/b for c in C)
W = V.span(V(x.padded_list(R)) for x in quots)
I = I.intersection(W)
if I.rank() < d:
verbose(" Cut down to dimension %s" % I.rank(), level=1)
d = I.rank()
if I.rank() == 0:
break
verbose("Done: intersection is %s-dimensional" % I.dimension(), t=t, level=1)
A = PowerSeriesRing(K, 'q')
return [A(x.list()).add_bigoh(R) for x in I.gens()]
def modular_ratio_to_prec(chi, qexp, prec):
r"""
Given a q-expansion of a modular ratio up to sufficient precision to
determine it uniquely, compute it to greater precision.
EXAMPLES::
sage: from sage.modular.modform.weight1 import modular_ratio_to_prec
sage: R.<q> = QQ[[]]
sage: modular_ratio_to_prec(DirichletGroup(31,QQ).0, q-q^2-q^5-q^7+q^8+O(q^9), 20)
q - q^2 - q^5 - q^7 + q^8 + q^9 + q^10 + q^14 - q^16 - q^18 - q^19 + O(q^20)
"""
if prec <= qexp.prec():
return qexp.add_bigoh(prec)
from sage.modular.modform.constructor import EisensteinForms, CuspForms
C = CuspForms(chi.level(), 2, base_ring=qexp.base_ring())
B = EisensteinForms(~chi, 1).gen(0).qexp(prec)
qexp = qexp.add_bigoh(C.sturm_bound())
fB = qexp * B
fB_elt = C(fB, check=False)
return fB_elt.qexp(prec) / B
@cached_function
def hecke_stable_subspace(chi, aux_prime=ZZ(2)):
r"""
Compute a q-expansion basis for S_1(chi).
Results are returned as q-expansions to a certain fixed (and fairly high)
precision. If more precision is required this can be obtained with
:func:`modular_ratio_to_prec`.
EXAMPLES::
sage: from sage.modular.modform.weight1 import hecke_stable_subspace
sage: hecke_stable_subspace(DirichletGroup(59, QQ).0)
[q - q^3 + q^4 - q^5 - q^7 - q^12 + q^15 + q^16 + 2*q^17 - q^19 - q^20 + q^21 + q^27 - q^28 - q^29 + q^35 + O(q^40)]
"""
# Deal quickly with the easy cases.
if chi(-1) == 1: return []
N = chi.modulus()
H = chi.kernel()
G = GammaH(N, H)
try:
if ArithmeticSubgroup.dimension_cusp_forms(G, 1) == 0:
verbose("no wt 1 cusp forms for N=%s, chi=%s by Riemann-Roch" % (N, chi._repr_short_()), level=1)
return []
except NotImplementedError:
pass
from sage.modular.modform.constructor import EisensteinForms
chi = chi.minimize_base_ring()
K = chi.base_ring()
# Auxiliary prime for Hecke stability method
l = aux_prime
while l.divides(N): l = l.next_prime()
verbose("Auxilliary prime: %s" % l, level=1)
# Compute working precision
R = l*Gamma0(N).sturm_bound(l + 2)
t=verbose("Computing modular ratio space", level=1)
mrs = modular_ratio_space(chi)
t=verbose("Computing modular ratios to precision %s" % R, level=1)
qexps = [modular_ratio_to_prec(chi, f, R) for f in mrs]
verbose("Done", t=t, level=1)
# We want to compute the largest subspace of I stable under T_l. To do
# this, we compute I intersect T_l(I) modulo q^(R/l), and take its preimage
# under T_l, which is then well-defined modulo q^R.
from sage.modular.modform.hecke_operator_on_qexp import hecke_operator_on_qexp
t = verbose("Computing Hecke-stable subspace", level=1)
A = PowerSeriesRing(K, 'q')
r = R // l
V = K**R
W = K**r
Tl_images = [hecke_operator_on_qexp(f, l, 1, chi) for f in qexps]
qvecs = [V(x.padded_list(R)) for x in qexps]
qvecs_trunc = [W(x.padded_list(r)) for x in qexps]
Tvecs = [W(x.padded_list(r)) for x in Tl_images]
I = V.submodule(qvecs)
Iimage = W.span(qvecs_trunc)
TlI = W.span(Tvecs)
Jimage = Iimage.intersection(TlI)
J = I.Hom(W)(Tvecs).inverse_image(Jimage)
verbose("Hecke-stable subspace is %s-dimensional" % J.dimension(), t=t, level=1)
if J.rank() == 0: return []
# The theory does not guarantee that J is exactly S_1(chi), just that it is
# intermediate between S_1(chi) and M_1(chi). In every example I know of,
# it is equal to S_1(chi), but just for honesty, we check this anyway.
t=verbose("Checking cuspidality", level=1)
JEis = V.span(V(x.padded_list(R)) for x in EisensteinForms(chi, 1).q_echelon_basis(prec=R))
D = JEis.intersection(J)
if D.dimension() != 0:
raise ArithmeticError("Got non-cuspidal form!")
verbose("Done", t=t, level=1)
qexps = Sequence(A(x.list()).add_bigoh(R) for x in J.gens())
return qexps
@cached_function
def dimension_wt1_cusp_forms(chi):
r"""
Return the dimension of the space of cusp forms of weight 1 and character chi.
EXAMPLES::
sage: chi = DirichletGroup(59, QQ).0
sage: sage.modular.modform.weight1.dimension_wt1_cusp_forms(chi)
1
"""
return len(hecke_stable_subspace(chi))
@cached_function
def dimension_wt1_cusp_forms_gH(group):
r"""
Return the dimension of the space of cusp forms of weight 1 for the given
group (which should be of GammaH type). Computed by summing over Galois
orbits of characters modulo H.
EXAMPLES::
sage: sage.modular.modform.weight1.dimension_wt1_cusp_forms_gH(GammaH(31, [7]))
1
"""
chis = [g.minimize_base_ring() for g in group.characters_mod_H(galois_orbits=True)]
return sum(dimension_wt1_cusp_forms(chi) * chi.base_ring().degree() for chi in chis)
|
the-stack_0_23395 | #!/usr/bin/env python3
import argparse
import pitchfork
from pitchfork import angr, funcEntryState, _spectreSimgr
from abstractdata import publicValue, secretValue, pointerTo, pointerToUnconstrainedPublic, publicArray, secretArray, array, struct
import logging
l = logging.getLogger(__name__)
l.setLevel(logging.INFO)
def c_donna(args, generating_fname=False):
parser = argparse.ArgumentParser('c_donna')
args = parser.parse_args(args)
if generating_fname:
return ''
proj = angr.Project('fact-eval/c_donna')
state = funcEntryState(proj, "curve25519_donna", [
("mypublic", pointerTo(secretArray(32), 32)),
("_secret", pointerTo(secretArray(32), 32)),
("basepoint", pointerTo(publicArray(32), 32)),
])
return proj, state, "C donna", None
def fact_donna(args, generating_fname=False):
parser = argparse.ArgumentParser('fact_donna')
args = parser.parse_args(args)
if generating_fname:
return ''
# this is O2 optimized fact
proj = angr.Project('fact-eval/fact_donna')
state = funcEntryState(proj, "curve25519_donna", [
("mypublic", pointerTo(secretArray(32), 32)),
("_secret", pointerTo(secretArray(32), 32)),
("basepoint", pointerTo(publicArray(32), 32)),
])
return proj, state, "FaCT donna", None
def c_ssl3(args, generating_fname=False):
parser = argparse.ArgumentParser('c_ssl3')
args = parser.parse_args(args)
if generating_fname:
return ''
proj = angr.Project('fact-eval/c_s3_cbc.O3')
thing = publicValue(value=4)
ctx_struct = [
pointerTo(thing),
publicArray(256),
]
ctx = struct(ctx_struct)
state = funcEntryState(proj, "ssl3_cbc_digest_record", [
("ctx", pointerTo(ctx)),
("md_out", pointerTo(secretArray(64), 64)),
("md_out_size", publicValue(64)),
("header", pointerTo(secretArray(16), 16)),
("data", pointerTo(secretArray(256), 256)), # XXX should be unconstrained
("data_plus_mac_size", secretValue()),
("data_plus_mac_plus_padding_size", publicValue(256)),
("mac_secret", pointerTo(secretArray(32), 32)),
("mac_secret_length", publicValue(32)),
("is_sslv3", publicValue(0, bits=8)),
])
# XXX add constraints
return proj, state, "C ssl3", None, '0011001000100010101000'
def fact_ssl3(args, generating_fname=False):
parser = argparse.ArgumentParser('fact_ssl3')
args = parser.parse_args(args)
if generating_fname:
return ''
proj = angr.Project('fact-eval/fact_s3_cbc.O3')
state = funcEntryState(proj, "__ssl3_cbc_digest_record", [
("md_state", pointerTo(secretArray(216), 216)),
("mac_out", pointerTo(secretArray(64), 64)),
("basepoint", pointerTo(publicArray(32), 32)),
("header", pointerTo(secretArray(16), 16)),
("__header_len", publicValue(16)),
("data", pointerTo(secretArray(256), 256)), # XXX should be unconstrained
("__data_len", publicValue(256)),
("data_plus_mac_size", secretValue()),
])
# XXX add constraints
return proj, state, "FaCT ssl3", None
class AesStub(angr.SimProcedure):
def run(self, in_, out, len_, key, iv, enc):
l.info("stubbing out a call to aesni_cbc_encrypt")
return
def c_mee(args, generating_fname=False):
if generating_fname:
return ''
binary = 'fact-eval/c_mee.O3'
declassified_load = 0x516c5f
proj = angr.Project(binary)
proj.hook_symbol("aesni_cbc_encrypt", AesStub())
aes_key_ks = [
[secretValue(bits=32) for _ in range(60)], # 0..ef
publicValue(bits=32), # f0..f3
]
sha_ctx_head = [
[secretValue(bits=32) for _ in range(5)], # f4..107
publicValue(bits=32), # 108..10b
publicValue(bits=32), # 10c..10f
secretArray(64), # 110..14f
publicValue(bits=32), # 150..153
]
sha_ctx_tail = [
[secretValue(bits=32) for _ in range(5)], # 154..167
publicValue(bits=32), # 168..16b
publicValue(bits=32), # 16c..16f
secretArray(64), # 170..1af
publicValue(bits=32), # 1b0..1b3
]
sha_ctx_md = [
[secretValue(bits=32) for _ in range(5)], # 1b4
publicValue(bits=32),
publicValue(bits=32),
secretArray(64),
publicValue(bits=32),
]
evp_aes_hmac_sha1 = [
aes_key_ks,
sha_ctx_head,
sha_ctx_tail,
sha_ctx_md,
publicValue(bits=32), # [pad] 214..217
publicValue(13, bits=64), # 218..21f
[publicValue(bits=8) for _ in range(16)],
#[publicValue(bits=8) for _ in range(9)] + [publicValue(0x0302, bits=16), publicValue(bits=16)],
secretArray(16),
]
evp_cipher_ctx_st = [
pointerToUnconstrainedPublic(), # cipher
pointerToUnconstrainedPublic(), # engine
publicValue(0, bits=32), # encrypt
publicValue(bits=32), # buf_len
publicArray(16), # oiv
publicArray(16), # iv
publicArray(32), # buf
publicValue(bits=32), # num
publicValue(bits=32), # [padding]
pointerToUnconstrainedPublic(), # app_data
publicValue(bits=32), # key_len
publicValue(bits=32), # [padding]
publicValue(bits=64), # flags
pointerTo(struct(evp_aes_hmac_sha1)), # cipher_data
publicValue(bits=32), # final_used
publicValue(bits=32), # block_mask
publicArray(32), # final
]
ctx = struct(evp_cipher_ctx_st)
state = funcEntryState(proj, "aesni_cbc_hmac_sha1_cipher", [
("ctx", pointerTo(ctx)),
("out", pointerTo(secretArray(1024), 1024)), # XXX should be unconstrained
("in", pointerTo(publicArray(1024), 1024)), # XXX should be unconstrained
("len", publicValue(1024, bits=64)),
])
return proj, state, "mee", [declassified_load], '00110010011001111011'
def fact_mee(args, generating_fname=False):
parser = argparse.ArgumentParser('fact_mee')
parser.add_argument('--unopt', action='store_true')
args = parser.parse_args(args)
if generating_fname:
fname = ''
argsd= dict(vars(args))
for arg in sorted(argsd):
val = argsd[arg]
sarg = ''
if arg == 'unopt':
if val:
sarg = 'unopt'
else:
sarg = 'O3'
elif arg == 'mod':
if val:
sarg = 'mod'
else:
sarg = arg_to_fname(arg, val)
if sarg:
fname += '.' + sarg
return fname
print(args, flush=True)
binary = 'fact-eval/fact_mee'
declassified_load = 0x401cf3
if not args.unopt:
binary += '.O3'
declassified_load = 0x401854
proj = angr.Project(binary)
aes_key_ks = [
[secretValue(bits=32) for _ in range(60)], # 0..ef
publicValue(bits=32), # f0..f3
]
sha_ctx_head = [
[secretValue(bits=32) for _ in range(5)], # f4..107
publicValue(bits=32), # 108..10b
publicValue(bits=32), # 10c..10f
secretArray(64), # 110..14f
publicValue(bits=32), # 150..153
]
sha_ctx_tail = [
[secretValue(bits=32) for _ in range(5)], # 154..167
publicValue(bits=32), # 168..16b
publicValue(bits=32), # 16c..16f
secretArray(64), # 170..1af
publicValue(bits=32), # 1b0..1b3
]
sha_ctx_md = [
[secretValue(bits=32) for _ in range(5)], # 1b4..1c7
publicValue(bits=32), # 1c8..1cb
publicValue(bits=32), # 1cc..1cf
secretArray(64), # 1d0..20f
publicValue(bits=32), # 210..213
]
evp_aes_hmac_sha1 = [
aes_key_ks,
sha_ctx_head,
sha_ctx_tail,
sha_ctx_md,
publicValue(bits=64), # 218
secretArray(16),
]
evp_aes_hmac_sha1 = struct(evp_aes_hmac_sha1)
state = funcEntryState(proj, "_aesni_cbc_hmac_sha1_cipher", [
("iv", pointerTo(publicArray(16), 16)),
("key", pointerTo(evp_aes_hmac_sha1)),
("out", pointerTo(secretArray(1024), 1024)), # XXX should be unconstrained
("out_len", publicValue(1024, bits=64)),
("in", pointerTo(publicArray(1024), 1024)), # XXX should be unconstrained
("in_len", publicValue(1024, bits=64)),
("tls_ver", publicValue(0x0302, bits=16)),
])
return proj, state, "mee", [declassified_load]
def c_secretbox(args, generating_fname=False):
parser = argparse.ArgumentParser('c_secretbox')
parser.add_argument('--asm', action='store_true')
parser.add_argument('--open', action='store_true')
args = parser.parse_args(args)
if generating_fname:
fname = ''
argsd= dict(vars(args))
if argsd['open']:
fname += '_open'
del argsd['open']
argsd['opt'] = 'O2'
for arg in sorted(argsd):
val = argsd[arg]
sarg = ''
if arg == 'asm':
if val:
sarg = 'asm'
else:
sarg = 'cref'
else:
sarg = arg_to_fname(arg, val)
if sarg:
fname += '.' + sarg
return fname
print(args, flush=True)
binary = 'fact-eval/c_secretbox'
if args.asm:
binary += '.asm'
else:
binary += '.cref'
binary += '.O2'
proj = angr.Project(binary)
fname = 'crypto_secretbox'
path = ''
declassified_verify_branch = []
if args.open:
fname += '_open'
params = [
("m", pointerTo(secretArray(1024), 1024)), # XXX should be unconstrained
("c", pointerTo(publicArray(1024), 1024)), # XXX should be unconstrained
("clen", publicValue(1024, bits=64)),
]
declassified_verify_branch = 0x401d80
path = '1011001000100010101000'
else:
params = [
("c", pointerTo(secretArray(1024), 1024)), # XXX should be unconstrained
("m", pointerTo(secretArray(1024), 1024)), # XXX should be unconstrained
("mlen", publicValue(1024, bits=64)),
]
if args.asm:
path = '0100000000011001011001000100010101000'
else:
path = '01000000000110110010011001111011'
params += [
("n", pointerTo(publicArray(24), 24)),
("k", pointerTo(secretArray(32), 32)),
]
state = funcEntryState(proj, fname, params)
return proj, state, fname, [declassified_verify_branch], path
def fact_secretbox(args, generating_fname=False):
parser = argparse.ArgumentParser('fact_secretbox')
parser.add_argument('--asm', action='store_true')
parser.add_argument('--unopt', action='store_true')
parser.add_argument('--open', action='store_true')
args = parser.parse_args(args)
if generating_fname:
fname = ''
argsd= dict(vars(args))
if argsd['open']:
fname += '_open'
del argsd['open']
for arg in sorted(argsd):
val = argsd[arg]
sarg = ''
if arg == 'unopt':
if val:
sarg = 'unopt'
else:
sarg = 'O2'
elif arg == 'asm':
if val:
sarg = 'asm'
else:
sarg = 'cref'
else:
sarg = arg_to_fname(arg, val)
if sarg:
fname += '.' + sarg
return fname
print(args, flush=True)
binary = 'fact-eval/fact_secretbox'
if args.asm:
binary += '.asm'
else:
binary += '.cref'
if not args.unopt:
binary += '.O2'
proj = angr.Project(binary)
if args.unopt:
if not args.asm:
declassified_verify_branch = 0x403075
else:
declassified_verify_branch = 0x404095
else:
if not args.asm:
declassified_verify_branch = 0x4020be
else:
declassified_verify_branch = 0x40237e
fname = '_crypto_secretbox'
if args.open:
fname += '_open'
params = [
("m", pointerTo(secretArray(1024), 1024)), # XXX should be unconstrained
("m_len", publicValue(1024, bits=64)),
("c", pointerTo(publicArray(1024), 1024)), # XXX should be unconstrained
("c_len", publicValue(1024, bits=64)),
]
else:
params = [
("c", pointerTo(secretArray(1024), 1024)), # XXX should be unconstrained
("c_len", publicValue(1024, bits=64)),
("m", pointerTo(secretArray(1024), 1024)), # XXX should be unconstrained
("m_len", publicValue(1024, bits=64)),
]
params += [
("n", pointerTo(publicArray(24), 24)),
("k", pointerTo(secretArray(32), 32)),
]
state = funcEntryState(proj, fname, params)
return proj, state, fname, [declassified_verify_branch]
def arg_to_fname(arg, val):
sarg = ''
if isinstance(val, bool):
if not val:
sarg = 'no-'
sarg += arg
elif isinstance(val, int):
if val is not None:
sarg = arg
sarg += str(val)
return sarg
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--trace', action='store_true')
parser.add_argument('--spec', action='store_true')
parser.add_argument('--window', type=int, action='store', default=250)
parser.add_argument('--misforwarding', action='store_true')
parser.add_argument('--generating-filename', action='store_true')
parser.add_argument('--guided', action='store_true')
parser.add_argument('test')
args, remaining_args = parser.parse_known_args()
generating_filename = args.generating_filename
if generating_filename:
del vars(args)['generating_filename']
argsd= dict(vars(args))
del argsd['test']
del argsd['trace']
fname = args.test
fname += globals()[args.test](remaining_args, generating_fname=True)
for arg in sorted(argsd):
val = argsd[arg]
sarg = ''
if arg == 'misforwarding':
if args.spec:
sarg = 'misfwd' if val else 'basicfwd'
elif arg == 'spec':
if val:
sarg = 'spec'
else:
sarg = 'ct'
elif arg == 'window':
if args.spec:
sarg = arg_to_fname(arg, val)
elif arg == 'guided':
continue
else:
sarg = arg_to_fname(arg, val)
if sarg:
fname += '.' + sarg
if args.trace:
fname += '.trace'
print(fname)
exit(0)
print(args, flush=True)
rvals = globals()[args.test](remaining_args)
proj = rvals[0]
state = rvals[1]
fname = rvals[2]
whitelist = rvals[3]
path = ''
if args.guided:
if len(rvals) > 4:
path = rvals[4]
if not path:
print('no guiding path for this test case', file=sys.stderr)
exit(1)
_spectreSimgr(lambda: (proj, state), [], fname, "explicit", spec=args.spec, misforwarding=args.misforwarding, whitelist=whitelist, window=args.window, trace=args.trace, takepath=path)
|
the-stack_0_23396 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Searches for albums in the MusicBrainz database.
"""
from __future__ import division, absolute_import, print_function
import musicbrainzngs
import re
import traceback
from six.moves.urllib.parse import urljoin
from beets import logging
import beets.autotag.hooks
import beets
from beets import util
from beets import config
import six
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
if util.SNI_SUPPORTED:
BASE_URL = 'https://musicbrainz.org/'
else:
BASE_URL = 'http://musicbrainz.org/'
SKIPPED_TRACKS = ['[data track]']
musicbrainzngs.set_useragent('beets', beets.__version__,
'https://beets.io/')
class MusicBrainzAPIError(util.HumanReadableException):
"""An error while talking to MusicBrainz. The `query` field is the
parameter to the action and may have any type.
"""
def __init__(self, reason, verb, query, tb=None):
self.query = query
if isinstance(reason, musicbrainzngs.WebServiceError):
reason = u'MusicBrainz not reachable'
super(MusicBrainzAPIError, self).__init__(reason, verb, tb)
def get_message(self):
return u'{0} in {1} with query {2}'.format(
self._reasonstr(), self.verb, repr(self.query)
)
log = logging.getLogger('beets')
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits', 'aliases',
'recording-level-rels', 'work-rels',
'work-level-rels', 'artist-rels']
TRACK_INCLUDES = ['artists', 'aliases']
if 'work-level-rels' in musicbrainzngs.VALID_INCLUDES['recording']:
TRACK_INCLUDES += ['work-level-rels', 'artist-rels']
def track_url(trackid):
return urljoin(BASE_URL, 'recording/' + trackid)
def album_url(albumid):
return urljoin(BASE_URL, 'release/' + albumid)
def configure():
"""Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup.
"""
hostname = config['musicbrainz']['host'].as_str()
musicbrainzngs.set_hostname(hostname)
musicbrainzngs.set_rate_limit(
config['musicbrainz']['ratelimit_interval'].as_number(),
config['musicbrainz']['ratelimit'].get(int),
)
def _preferred_alias(aliases):
"""Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching
alias is found.
"""
if not aliases:
return
# Only consider aliases that have locales set.
aliases = [a for a in aliases if 'locale' in a]
# Search configured locales in order.
for locale in config['import']['languages'].as_str_seq():
# Find matching primary aliases for this locale.
matches = [a for a in aliases
if a['locale'] == locale and 'primary' in a]
# Skip to the next locale if we have no matches
if not matches:
continue
return matches[0]
def _preferred_release_event(release):
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
"""
countries = config['match']['preferred']['countries'].as_str_seq()
for country in countries:
for event in release.get('release-event-list', {}):
try:
if country in event['area']['iso-3166-1-code-list']:
return country, event['date']
except KeyError:
pass
return release.get('country'), release.get('date')
def _flatten_artist_credit(credit):
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
credit.
"""
artist_parts = []
artist_sort_parts = []
artist_credit_parts = []
for el in credit:
if isinstance(el, six.string_types):
# Join phrase.
artist_parts.append(el)
artist_credit_parts.append(el)
artist_sort_parts.append(el)
else:
alias = _preferred_alias(el['artist'].get('alias-list', ()))
# An artist.
if alias:
cur_artist_name = alias['alias']
else:
cur_artist_name = el['artist']['name']
artist_parts.append(cur_artist_name)
# Artist sort name.
if alias:
artist_sort_parts.append(alias['sort-name'])
elif 'sort-name' in el['artist']:
artist_sort_parts.append(el['artist']['sort-name'])
else:
artist_sort_parts.append(cur_artist_name)
# Artist credit.
if 'name' in el:
artist_credit_parts.append(el['name'])
else:
artist_credit_parts.append(cur_artist_name)
return (
''.join(artist_parts),
''.join(artist_sort_parts),
''.join(artist_credit_parts),
)
def track_info(recording, index=None, medium=None, medium_index=None,
medium_total=None):
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
only for tracks that appear on releases (non-singletons): ``index``,
the overall track number; ``medium``, the disc number;
``medium_index``, the track's index on its medium; ``medium_total``,
the number of tracks on the medium. Each number is a 1-based index.
"""
info = beets.autotag.hooks.TrackInfo(
recording['title'],
recording['id'],
index=index,
medium=medium,
medium_index=medium_index,
medium_total=medium_total,
data_source=u'MusicBrainz',
data_url=track_url(recording['id']),
)
if recording.get('artist-credit'):
# Get the artist names.
info.artist, info.artist_sort, info.artist_credit = \
_flatten_artist_credit(recording['artist-credit'])
# Get the ID and sort name of the first artist.
artist = recording['artist-credit'][0]['artist']
info.artist_id = artist['id']
if recording.get('length'):
info.length = int(recording['length']) / (1000.0)
lyricist = []
composer = []
composer_sort = []
for work_relation in recording.get('work-relation-list', ()):
if work_relation['type'] != 'performance':
continue
info.work = work_relation['work']['title']
info.mb_workid = work_relation['work']['id']
if 'disambiguation' in work_relation['work']:
info.work_disambig = work_relation['work']['disambiguation']
for artist_relation in work_relation['work'].get(
'artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'lyricist':
lyricist.append(artist_relation['artist']['name'])
elif type == 'composer':
composer.append(artist_relation['artist']['name'])
composer_sort.append(
artist_relation['artist']['sort-name'])
if lyricist:
info.lyricist = u', '.join(lyricist)
if composer:
info.composer = u', '.join(composer)
info.composer_sort = u', '.join(composer_sort)
arranger = []
for artist_relation in recording.get('artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'arranger':
arranger.append(artist_relation['artist']['name'])
if arranger:
info.arranger = u', '.join(arranger)
info.decode()
return info
def _set_date_str(info, date_str, original=False):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately. If
`original`, then set the original_year, etc., fields.
"""
if date_str:
date_parts = date_str.split('-')
for key in ('year', 'month', 'day'):
if date_parts:
date_part = date_parts.pop(0)
try:
date_num = int(date_part)
except ValueError:
continue
if original:
key = 'original_' + key
setattr(info, key, date_num)
def album_info(release):
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
# Get artist name using join phrases.
artist_name, artist_sort_name, artist_credit_name = \
_flatten_artist_credit(release['artist-credit'])
# Basic info.
track_infos = []
index = 0
for medium in release['medium-list']:
disctitle = medium.get('title')
format = medium.get('format')
if format in config['match']['ignored_media'].as_str_seq():
continue
all_tracks = medium['track-list']
if ('data-track-list' in medium
and not config['match']['ignore_data_tracks']):
all_tracks += medium['data-track-list']
track_count = len(all_tracks)
if 'pregap' in medium:
all_tracks.insert(0, medium['pregap'])
for track in all_tracks:
if ('title' in track['recording'] and
track['recording']['title'] in SKIPPED_TRACKS):
continue
if ('video' in track['recording'] and
track['recording']['video'] == 'true' and
config['match']['ignore_video_tracks']):
continue
# Basic information from the recording.
index += 1
ti = track_info(
track['recording'],
index,
int(medium['position']),
int(track['position']),
track_count,
)
ti.release_track_id = track['id']
ti.disctitle = disctitle
ti.media = format
ti.track_alt = track['number']
# Prefer track data, where present, over recording data.
if track.get('title'):
ti.title = track['title']
if track.get('artist-credit'):
# Get the artist names.
ti.artist, ti.artist_sort, ti.artist_credit = \
_flatten_artist_credit(track['artist-credit'])
ti.artist_id = track['artist-credit'][0]['artist']['id']
if track.get('length'):
ti.length = int(track['length']) / (1000.0)
track_infos.append(ti)
info = beets.autotag.hooks.AlbumInfo(
release['title'],
release['id'],
artist_name,
release['artist-credit'][0]['artist']['id'],
track_infos,
mediums=len(release['medium-list']),
artist_sort=artist_sort_name,
artist_credit=artist_credit_name,
data_source=u'MusicBrainz',
data_url=album_url(release['id']),
)
info.va = info.artist_id == VARIOUS_ARTISTS_ID
if info.va:
info.artist = config['va_name'].as_str()
info.asin = release.get('asin')
info.releasegroup_id = release['release-group']['id']
info.albumstatus = release.get('status')
# Get the disambiguation strings at the release and release group level.
if release['release-group'].get('disambiguation'):
info.releasegroupdisambig = \
release['release-group'].get('disambiguation')
if release.get('disambiguation'):
info.albumdisambig = release.get('disambiguation')
# Get the "classic" Release type. This data comes from a legacy API
# feature before MusicBrainz supported multiple release types.
if 'type' in release['release-group']:
reltype = release['release-group']['type']
if reltype:
info.albumtype = reltype.lower()
# Log the new-style "primary" and "secondary" release types.
# Eventually, we'd like to actually store this data, but we just log
# it for now to help understand the differences.
if 'primary-type' in release['release-group']:
rel_primarytype = release['release-group']['primary-type']
if rel_primarytype:
log.debug('primary MB release type: ' + rel_primarytype.lower())
if 'secondary-type-list' in release['release-group']:
if release['release-group']['secondary-type-list']:
log.debug('secondary MB release type(s): ' + ', '.join(
[secondarytype.lower() for secondarytype in
release['release-group']['secondary-type-list']]))
# Release events.
info.country, release_date = _preferred_release_event(release)
release_group_date = release['release-group'].get('first-release-date')
if not release_date:
# Fall back if release-specific date is not available.
release_date = release_group_date
_set_date_str(info, release_date, False)
_set_date_str(info, release_group_date, True)
# Label name.
if release.get('label-info-list'):
label_info = release['label-info-list'][0]
if label_info.get('label'):
label = label_info['label']['name']
if label != '[no label]':
info.label = label
info.catalognum = label_info.get('catalog-number')
# Text representation data.
if release.get('text-representation'):
rep = release['text-representation']
info.script = rep.get('script')
info.language = rep.get('language')
# Media (format).
if release['medium-list']:
first_medium = release['medium-list'][0]
info.media = first_medium.get('format')
info.decode()
return info
def match_album(artist, album, tracks=None):
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a
MusicBrainzAPIError.
The query consists of an artist name, an album name, and,
optionally, a number of tracks on the album.
"""
# Build search criteria.
criteria = {'release': album.lower().strip()}
if artist is not None:
criteria['artist'] = artist.lower().strip()
else:
# Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None:
criteria['tracks'] = six.text_type(tracks)
# Abort if we have no search terms.
if not any(criteria.values()):
return
try:
log.debug(u'Searching for MusicBrainz releases with: {!r}', criteria)
res = musicbrainzngs.search_releases(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'release search', criteria,
traceback.format_exc())
for release in res['release-list']:
# The search result is missing some data (namely, the tracks),
# so we just use the ID and fetch the rest of the information.
albuminfo = album_for_id(release['id'])
if albuminfo is not None:
yield albuminfo
def match_track(artist, title):
"""Searches for a single track and returns an iterable of TrackInfo
objects. May raise a MusicBrainzAPIError.
"""
criteria = {
'artist': artist.lower().strip(),
'recording': title.lower().strip(),
}
if not any(criteria.values()):
return
try:
res = musicbrainzngs.search_recordings(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'recording search', criteria,
traceback.format_exc())
for recording in res['recording-list']:
yield track_info(recording)
def _parse_id(s):
"""Search for a MusicBrainz ID in the given string and return it. If
no ID can be found, return None.
"""
# Find the first thing that looks like a UUID/MBID.
match = re.search(u'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
if match:
return match.group()
def album_for_id(releaseid):
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a
MusicBrainzAPIError.
"""
log.debug(u'Requesting MusicBrainz release {}', releaseid)
albumid = _parse_id(releaseid)
if not albumid:
log.debug(u'Invalid MBID ({0}).', releaseid)
return
try:
res = musicbrainzngs.get_release_by_id(albumid,
RELEASE_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug(u'Album ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, u'get release by ID', albumid,
traceback.format_exc())
return album_info(res['release'])
def track_for_id(releaseid):
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError.
"""
trackid = _parse_id(releaseid)
if not trackid:
log.debug(u'Invalid MBID ({0}).', releaseid)
return
try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug(u'Track ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, u'get recording by ID', trackid,
traceback.format_exc())
return track_info(res['recording'])
|
the-stack_0_23397 | #!/usr/bin/env python3
#
# Copyright (c) 2019-2020 Mike's Pub, see https://github.com/mikespub-org
# Licensed under the MIT license: https://opensource.org/licenses/mit-license.php
#
"""Basic support of Google Cloud Datastore as filesystem with PyFilesystem2
Example opening directly with DatastoreFS():
>>> from datastore_fs import DatastoreFS
>>> data_fs = DatastoreFS()
>>> data_fs.listdir("/")
Example opening via a FS URL "datastore://"
>>> import fs
>>> import datastore_fs # not registered by default, so we need to import first
>>> data_fs = fs.open_fs("datastore://")
>>> data_fs.listdir("/")
For more information on PyFilesystem2, see https://docs.pyfilesystem.org/
"""
import datetime
import io
import itertools
import logging
from functools import partial
from fs import errors
from fs.base import FS
from fs.info import Info
from fs.iotools import RawWrapper
from fs.mode import Mode
# for opener
from fs.opener import Opener, open_fs, registry
from fs.path import join, split
from fs.wrapfs import WrapFS
# use the datastore fs module here
from . import fs as data_fs
# TODO: replace with more advanced IO class - see e.g. _MemoryFile in fs.memoryfs
# from .fs import BtIO
#
# Specify location of your service account credentials in environment variable before you start:
#
# $ export GOOGLE_APPLICATION_CREDENTIALS="~/datastore-user.cred.json"
#
# See https://cloud.google.com/docs/authentication/getting-started for details...
#
# Or specify in startup script or .env file elsewere:
# import os
# os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "~/datastore-user.cred.json"
#
__all__ = ["DatastoreFS", "WrapDatastoreFS"]
log = logging.getLogger(__name__)
class DatastoreFS(FS):
def __init__(self, root_path=None, use_cache=True):
# self._meta = {}
super().__init__()
if root_path is None:
root_path = "/_datastore_fs_"
_root_path = self.validatepath(root_path)
if not _root_path.startswith("/"):
_root_path = "/" + _root_path
self._is_cached = True
if not use_cache:
self._stop_cache(True)
# Initialize Datastore filesystem if needed
data_fs.initfs()
# Check if the requested root_path exists
_res = data_fs._getresource(_root_path)
if _res:
if _res.isdir():
log.info("Root path exists %s" % _root_path)
else:
raise errors.DirectoryExpected(root_path)
else:
log.info("Creating root path %s" % _root_path)
_res = data_fs.mkdir(_root_path)
log.info("Resource: %s" % _res)
self.root_path = _root_path
self.root_res = _res
# https://docs.pyfilesystem.org/en/latest/implementers.html#essential-methods
# From https://github.com/PyFilesystem/pyfilesystem2/blob/master/fs/base.py
# ---------------------------------------------------------------- #
# Required methods #
# Filesystems must implement these methods. #
# ---------------------------------------------------------------- #
def getinfo(self, path, namespaces=None):
# type: (Text, Optional[Collection[Text]]) -> Info
"""Get information about a resource on a filesystem.
Arguments:
path (str): A path to a resource on the filesystem.
namespaces (list, optional): Info namespaces to query
(defaults to *[basic]*).
Returns:
~fs.info.Info: resource information object.
For more information regarding resource information, see :ref:`info`.
"""
namespaces = namespaces or ()
_res = self._getresource(path)
if _res is None:
raise errors.ResourceNotFound(path)
return self._make_info_from_resource(_res, namespaces)
def listdir(self, path):
# type: (Text) -> List[Text]
"""Get a list of the resource names in a directory.
This method will return a list of the resources in a directory.
A *resource* is a file, directory, or one of the other types
defined in `~fs.enums.ResourceType`.
Arguments:
path (str): A path to a directory on the filesystem
Returns:
list: list of names, relative to ``path``.
Raises:
fs.errors.DirectoryExpected: If ``path`` is not a directory.
fs.errors.ResourceNotFound: If ``path`` does not exist.
"""
with self._lock:
_res = self._getresource(path)
if not _res:
raise errors.ResourceNotFound(path)
if not _res.isdir():
raise errors.DirectoryExpected(path)
return _res.listdir()
def makedir(
self,
path, # type: Text
permissions=None, # type: Optional[Permissions]
recreate=False, # type: bool
):
# type: (...) -> SubFS[FS]
"""Make a directory.
Arguments:
path (str): Path to directory from root.
permissions (~fs.permissions.Permissions, optional): a
`Permissions` instance, or `None` to use default.
recreate (bool): Set to `True` to avoid raising an error if
the directory already exists (defaults to `False`).
Returns:
~fs.subfs.SubFS: a filesystem whose root is the new directory.
Raises:
fs.errors.DirectoryExists: If the path already exists.
fs.errors.ResourceNotFound: If the path is not found.
"""
# mode = Permissions.get_mode(permissions)
_path = self.validatepath(path)
with self._lock:
if _path == "/":
if recreate:
return self.opendir(path)
else:
raise errors.DirectoryExists(path)
if _path.endswith("/"):
_path = _path[:-1]
dir_path, dir_name = split(_path)
_dir_res = self._getresource(dir_path)
if not _dir_res or not _dir_res.isdir():
raise errors.ResourceNotFound(path)
if dir_name in _dir_res.listdir():
if not recreate:
raise errors.DirectoryExists(path)
_res = self._getresource(path)
if _res and _res.isdir():
return self.opendir(path)
_res = data_fs.mkdir(self._prep_path(_path))
return self.opendir(path)
def openbin(
self,
path, # type: Text
mode="r", # type: Text
buffering=-1, # type: int
**options, # type: Any
):
# type: (...) -> BinaryIO
"""Open a binary file-like object.
Arguments:
path (str): A path on the filesystem.
mode (str): Mode to open file (must be a valid non-text mode,
defaults to *r*). Since this method only opens binary files,
the ``b`` in the mode string is implied.
buffering (int): Buffering policy (-1 to use default buffering,
0 to disable buffering, or any positive integer to indicate
a buffer size).
**options: keyword arguments for any additional information
required by the filesystem (if any).
Returns:
io.IOBase: a *file-like* object.
Raises:
fs.errors.FileExpected: If the path is not a file.
fs.errors.FileExists: If the file exists, and *exclusive mode*
is specified (``x`` in the mode).
fs.errors.ResourceNotFound: If the path does not exist.
"""
_mode = Mode(mode)
_mode.validate_bin()
_path = self.validatepath(path)
dir_path, file_name = split(_path)
if not file_name:
raise errors.FileExpected(path)
with self._lock:
_dir_res = self._getresource(dir_path)
if not _dir_res or not _dir_res.isdir():
raise errors.ResourceNotFound(path)
if _mode.create:
if file_name in _dir_res.listdir():
if _mode.exclusive:
raise errors.FileExists(path)
_res = self._getresource(path)
if not _res or not _res.isfile():
raise errors.FileExpected(path)
return self._btopen(_res, mode)
return self._btopen(self._prep_path(_path), mode)
if file_name not in _dir_res.listdir():
raise errors.ResourceNotFound(path)
_res = self._getresource(path)
if not _res or not _res.isfile():
raise errors.FileExpected(path)
return self._btopen(_res, mode)
def remove(self, path):
# type: (Text) -> None
"""Remove a file from the filesystem.
Arguments:
path (str): Path of the file to remove.
Raises:
fs.errors.FileExpected: If the path is a directory.
fs.errors.ResourceNotFound: If the path does not exist.
"""
with self._lock:
_res = self._getresource(path)
if not _res:
raise errors.ResourceNotFound(path)
if not _res.isfile():
raise errors.FileExpected(path)
_res.delete()
def removedir(self, path):
# type: (Text) -> None
"""Remove a directory from the filesystem.
Arguments:
path (str): Path of the directory to remove.
Raises:
fs.errors.DirectoryNotEmpty: If the directory is not empty (
see `~fs.base.FS.removetree` for a way to remove the
directory contents.).
fs.errors.DirectoryExpected: If the path does not refer to
a directory.
fs.errors.ResourceNotFound: If no resource exists at the
given path.
fs.errors.RemoveRootError: If an attempt is made to remove
the root directory (i.e. ``'/'``)
"""
_path = self.validatepath(path)
if _path == "/" or _path == "" or _path is None:
raise errors.RemoveRootError()
if _path.endswith("/"):
_path = _path[:-1]
with self._lock:
_res = self._getresource(path)
if not _res:
raise errors.ResourceNotFound(path)
if not _res.isdir():
raise errors.DirectoryExpected(path)
if len(_res.listdir()) > 0:
raise errors.DirectoryNotEmpty(path)
_res.delete(recursive=False)
def setinfo(self, path, info):
# type: (Text, RawInfo) -> None
"""Set info on a resource.
This method is the complement to `~fs.base.FS.getinfo`
and is used to set info values on a resource.
Arguments:
path (str): Path to a resource on the filesystem.
info (dict): Dictionary of resource info.
Raises:
fs.errors.ResourceNotFound: If ``path`` does not exist
on the filesystem
The ``info`` dict should be in the same format as the raw
info returned by ``getinfo(file).raw``.
Example:
>>> details_info = {"details": {
... "modified": time.time()
... }}
>>> my_fs.setinfo('file.txt', details_info)
"""
with self._lock:
_res = self._getresource(path)
if not _res:
raise errors.ResourceNotFound(path)
if "details" in info:
details = info["details"]
if (
"accessed" in details
or "modified" in details
or "created" in details
):
accessed_time = int(details.get("accessed", 0))
modified_time = int(details.get("modified", 0))
created_time = int(details.get("created", 0))
if accessed_time and not modified_time:
modified_time = accessed_time
if created_time:
_res.create_time = datetime.datetime.fromtimestamp(
created_time, datetime.timezone.utc
)
if modified_time:
_res.modify_time = datetime.datetime.fromtimestamp(
modified_time, datetime.timezone.utc
)
_res.put()
# ---------------------------------------------------------------- #
# Optional methods #
# Filesystems *may* implement these methods. #
# ---------------------------------------------------------------- #
def exists(self, path):
# type: (Text) -> bool
"""Check if a path maps to a resource.
Arguments:
path (str): Path to a resource.
Returns:
bool: `True` if a resource exists at the given path.
"""
_res = self._getresource(path)
return _res is not None
def isdir(self, path):
# type: (Text) -> bool
"""Check if a path maps to an existing directory.
Parameters:
path (str): A path on the filesystem.
Returns:
bool: `True` if ``path`` maps to a directory.
"""
_res = self._getresource(path)
if not _res or not _res.isdir():
return False
return True
def isfile(self, path):
# type: (Text) -> bool
"""Check if a path maps to an existing file.
Parameters:
path (str): A path on the filesystem.
Returns:
bool: `True` if ``path`` maps to a file.
"""
_res = self._getresource(path)
if not _res or not _res.isfile():
return False
return True
def scandir(
self,
path, # type: Text
namespaces=None, # type: Optional[Collection[Text]]
page=None, # type: Optional[Tuple[int, int]]
):
# type: (...) -> Iterator[Info]
"""Get an iterator of resource info.
Arguments:
path (str): A path to a directory on the filesystem.
namespaces (list, optional): A list of namespaces to include
in the resource information, e.g. ``['basic', 'access']``.
page (tuple, optional): May be a tuple of ``(<start>, <end>)``
indexes to return an iterator of a subset of the resource
info, or `None` to iterate over the entire directory.
Paging a directory scan may be necessary for very large
directories.
Returns:
~collections.abc.Iterator: an iterator of `Info` objects.
Raises:
fs.errors.DirectoryExpected: If ``path`` is not a directory.
fs.errors.ResourceNotFound: If ``path`` does not exist.
"""
namespaces = namespaces or ()
_res = self._getresource(path)
if not _res:
raise errors.ResourceNotFound(path)
if not _res.isdir():
raise errors.DirectoryExpected(path)
iter_info = self._scandir_from_resource(_res, namespaces)
if page is not None:
start, end = page
iter_info = itertools.islice(iter_info, start, end)
return iter_info
def todo_filterdir(
self,
path, # type: Text
files=None, # type: Optional[Iterable[Text]]
dirs=None, # type: Optional[Iterable[Text]]
exclude_dirs=None, # type: Optional[Iterable[Text]]
exclude_files=None, # type: Optional[Iterable[Text]]
namespaces=None, # type: Optional[Collection[Text]]
page=None, # type: Optional[Tuple[int, int]]
):
# type: (...) -> Iterator[Info]
"""Get an iterator of resource info, filtered by patterns.
This method enhances `~fs.base.FS.scandir` with additional
filtering functionality.
Arguments:
path (str): A path to a directory on the filesystem.
files (list, optional): A list of UNIX shell-style patterns
to filter file names, e.g. ``['*.py']``.
dirs (list, optional): A list of UNIX shell-style patterns
to filter directory names.
exclude_dirs (list, optional): A list of patterns used
to exclude directories.
exclude_files (list, optional): A list of patterns used
to exclude files.
namespaces (list, optional): A list of namespaces to include
in the resource information, e.g. ``['basic', 'access']``.
page (tuple, optional): May be a tuple of ``(<start>, <end>)``
indexes to return an iterator of a subset of the resource
info, or `None` to iterate over the entire directory.
Paging a directory scan may be necessary for very large
directories.
Returns:
~collections.abc.Iterator: an iterator of `Info` objects.
"""
# TODO: apply filters directly in Dir.get_content() - see scandir()
resources = self.scandir(path, namespaces=namespaces)
filters = []
def match_dir(patterns, info):
# type: (Optional[Iterable[Text]], Info) -> bool
"""Pattern match info.name."""
return info.is_file or self.match(patterns, info.name)
def match_file(patterns, info):
# type: (Optional[Iterable[Text]], Info) -> bool
"""Pattern match info.name."""
return info.is_dir or self.match(patterns, info.name)
def exclude_dir(patterns, info):
# type: (Optional[Iterable[Text]], Info) -> bool
"""Pattern match info.name."""
return info.is_file or not self.match(patterns, info.name)
def exclude_file(patterns, info):
# type: (Optional[Iterable[Text]], Info) -> bool
"""Pattern match info.name."""
return info.is_dir or not self.match(patterns, info.name)
if files:
filters.append(partial(match_file, files))
if dirs:
filters.append(partial(match_dir, dirs))
if exclude_dirs:
filters.append(partial(exclude_dir, exclude_dirs))
if exclude_files:
filters.append(partial(exclude_file, exclude_files))
if filters:
resources = (
info for info in resources if all(_filter(info) for _filter in filters)
)
iter_info = iter(resources)
if page is not None:
start, end = page
iter_info = itertools.islice(iter_info, start, end)
return iter_info
def copy(self, src_path, dst_path, overwrite=False):
# type: (Text, Text, bool) -> None
"""Copy file contents from ``src_path`` to ``dst_path``.
Arguments:
src_path (str): Path of source file.
dst_path (str): Path to destination file.
overwrite (bool): If `True`, overwrite the destination file
if it exists (defaults to `False`).
Raises:
fs.errors.DestinationExists: If ``dst_path`` exists,
and ``overwrite`` is `False`.
fs.errors.ResourceNotFound: If a parent directory of
``dst_path`` does not exist.
"""
self.validatepath(src_path)
_dst_path = self.validatepath(dst_path)
with self._lock:
if not overwrite and self.exists(dst_path):
raise errors.DestinationExists(dst_path)
dir_path, file_name = split(_dst_path)
_dir_res = self._getresource(dir_path)
if not _dir_res or not _dir_res.isdir():
raise errors.ResourceNotFound(dst_path)
_src_res = self._getresource(src_path)
if not _src_res:
raise errors.ResourceNotFound(src_path)
if not _src_res.isfile():
raise errors.FileExpected(src_path)
data_fs.copyfile(_src_res, self._prep_path(_dst_path))
def move(self, src_path, dst_path, overwrite=False):
# type: (Text, Text, bool) -> None
"""Move a file from ``src_path`` to ``dst_path``.
Arguments:
src_path (str): A path on the filesystem to move.
dst_path (str): A path on the filesystem where the source
file will be written to.
overwrite (bool): If `True`, destination path will be
overwritten if it exists.
Raises:
fs.errors.FileExpected: If ``src_path`` maps to a
directory instead of a file.
fs.errors.DestinationExists: If ``dst_path`` exists,
and ``overwrite`` is `False`.
fs.errors.ResourceNotFound: If a parent directory of
``dst_path`` does not exist.
"""
# TODO: update parent key of chunk entities instead of copy & delete?
self.copy(src_path, dst_path, overwrite)
self.remove(src_path)
def create(self, path, wipe=False):
# type: (Text, bool) -> bool
"""Create an empty file.
The default behavior is to create a new file if one doesn't
already exist. If ``wipe`` is `True`, any existing file will
be truncated.
Arguments:
path (str): Path to a new file in the filesystem.
wipe (bool): If `True`, truncate any existing
file to 0 bytes (defaults to `False`).
Returns:
bool: `True` if a new file had to be created.
"""
with self._lock:
_res = self._getresource(path)
if _res:
if not _res.isfile():
raise errors.FileExpected(path)
if not wipe:
return False
_res.truncate(0)
else:
_path = self.validatepath(path)
dir_path, file_name = split(_path)
_dir_res = self._getresource(dir_path)
if not _dir_res or not _dir_res.isdir():
raise errors.ResourceNotFound(path)
_res = data_fs.mkfile(self._prep_path(_path))
return True
def readbytes(self, path):
# type: (Text) -> bytes
"""Get the contents of a file as bytes.
Arguments:
path (str): A path to a readable file on the filesystem.
Returns:
bytes: the file contents.
Raises:
fs.errors.ResourceNotFound: if ``path`` does not exist.
"""
with self._lock:
_res = self._getresource(path)
if not _res:
raise errors.ResourceNotFound(path)
if not _res.isfile():
raise errors.FileExpected(path)
return _res.get_content()
def download(self, path, file, chunk_size=None, **options):
# type: (Text, BinaryIO, Optional[int], **Any) -> None
"""Copies a file from the filesystem to a file-like object.
This may be more efficient that opening and copying files
manually if the filesystem supplies an optimized method.
Arguments:
path (str): Path to a resource.
file (file-like): A file-like object open for writing in
binary mode.
chunk_size (int, optional): Number of bytes to read at a
time, if a simple copy is used, or `None` to use
sensible default.
**options: Implementation specific options required to open
the source file.
Note that the file object ``file`` will *not* be closed by this
method. Take care to close it after this method completes
(ideally with a context manager).
Example:
>>> with open('starwars.mov', 'wb') as write_file:
... my_fs.download('/movies/starwars.mov', write_file)
"""
with self._lock:
_res = self._getresource(path)
if not _res:
raise errors.ResourceNotFound(path)
if not _res.isfile():
raise errors.FileExpected(path)
# Note: we always write in chunks here, regardless of the chunk_size
_res.download(file)
def writebytes(self, path, contents):
# type: (Text, bytes) -> None
# FIXME(@althonos): accept bytearray and memoryview as well ?
"""Copy binary data to a file.
Arguments:
path (str): Destination path on the filesystem.
contents (bytes): Data to be written.
Raises:
TypeError: if contents is not bytes.
"""
if not isinstance(contents, bytes):
raise TypeError("contents must be bytes")
with self._lock:
_res = self._getresource(path)
if _res:
if not _res.isfile():
raise errors.FileExpected(path)
_res.truncate(0)
else:
_path = self.validatepath(path)
dir_path, file_name = split(_path)
_dir_res = self._getresource(dir_path)
if not _dir_res or not _dir_res.isdir():
raise errors.ResourceNotFound(path)
_res = data_fs.mkfile(self._prep_path(_path))
_res.put_content(contents)
def upload(self, path, file, chunk_size=None, **options):
# type: (Text, BinaryIO, Optional[int], **Any) -> None
"""Set a file to the contents of a binary file object.
This method copies bytes from an open binary file to a file on
the filesystem. If the destination exists, it will first be
truncated.
Arguments:
path (str): A path on the filesystem.
file (io.IOBase): a file object open for reading in
binary mode.
chunk_size (int, optional): Number of bytes to read at a
time, if a simple copy is used, or `None` to use
sensible default.
**options: Implementation specific options required to open
the source file.
Note that the file object ``file`` will *not* be closed by this
method. Take care to close it after this method completes
(ideally with a context manager).
Example:
>>> with open('~/movies/starwars.mov', 'rb') as read_file:
... my_fs.upload('starwars.mov', read_file)
"""
with self._lock:
_res = self._getresource(path)
if _res:
if not _res.isfile():
raise errors.FileExpected(path)
_res.truncate(0)
else:
_path = self.validatepath(path)
dir_path, file_name = split(_path)
_dir_res = self._getresource(dir_path)
if not _dir_res or not _dir_res.isdir():
raise errors.ResourceNotFound(path)
_res = data_fs.mkfile(self._prep_path(_path))
# Note: we always read in chunks here, regardless of the chunk_size
_res.upload(file)
def close(self):
# type: () -> None
"""Close the filesystem and release any resources.
It is important to call this method when you have finished
working with the filesystem. Some filesystems may not finalize
changes until they are closed (archives for example). You may
call this method explicitly (it is safe to call close multiple
times), or you can use the filesystem as a context manager to
automatically close.
Example:
>>> with OSFS('~/Desktop') as desktop_fs:
... desktop_fs.writetext(
... 'note.txt',
... "Don't forget to tape Game of Thrones"
... )
If you attempt to use a filesystem that has been closed, a
`~fs.errors.FilesystemClosed` exception will be thrown.
"""
if not self._closed:
if hasattr(data_fs, "close") and callable(data_fs.close):
data_fs.close()
return super().close()
# ---------------------------------------------------------------- #
# Internal methods #
# Filesystem-specific methods. #
# ---------------------------------------------------------------- #
@staticmethod
def _make_info_from_resource(_res, namespaces):
def epoch(dt):
# return time.mktime(dt.utctimetuple())
return (
dt - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
) / datetime.timedelta(seconds=1)
st_size = _res.size
st_atime = epoch(_res.modify_time)
st_mtime = st_atime
st_ctime = epoch(_res.create_time)
info = {"basic": {"name": _res.basename(_res.path), "is_dir": _res.isdir()}}
if "details" in namespaces:
info["details"] = {
# "_write": ["accessed", "modified"],
"_write": ["created", "modified"],
"accessed": st_atime,
"modified": st_mtime,
"created": st_ctime,
"size": st_size,
# "type": int(cls._get_type_from_stat(stat_result)),
}
if _res.isdir():
info["details"]["type"] = 1
else:
info["details"]["type"] = 2
if "stat" in namespaces:
info["stat"] = {
"st_size": st_size,
"st_atime": st_atime,
"st_mtime": st_mtime,
"st_ctime": st_ctime,
}
# if "lstat" in namespaces:
# info["lstat"] = {
# k: getattr(_lstat, k) for k in dir(_lstat) if k.startswith("st_")
# }
# if "link" in namespaces:
# info["link"] = cls._make_link_info(sys_path)
# if "access" in namespaces:
# info["access"] = cls._make_access_from_stat(_stat)
return Info(info)
@classmethod
def _scandir_from_resource(cls, _res, namespaces):
for _child_res in _res.iget_content():
yield cls._make_info_from_resource(_child_res, namespaces)
def _prep_path(self, _path):
if _path.startswith(self.root_path + "/"):
return _path
if _path.startswith("/"):
_path = _path[1:]
return join(self.root_path, _path)
def _reset_path(self, path, confirm=False):
if not confirm:
print(
"Are you sure you want to reset path '%s' - located at '%s' on Cloud Datastore?"
% (path, self._prep_path(path))
)
return False
with self._lock:
try:
_res = self._getresource(path)
except:
_res = data_fs._getresource(self._prep_path(path))
if not _res or not _res.isdir():
raise errors.DirectoryExpected(path)
if len(_res.listdir()) < 1:
return self.opendir(path)
if not self._is_cached:
self._stop_cache(False)
_res.delete(recursive=True)
self._stop_cache(True)
else:
_res.delete(recursive=True)
_res = data_fs.mkdir(self._prep_path(path))
return self.opendir(path)
def _stop_cache(self, confirm=False):
if confirm:
self._is_cached = False
else:
self._is_cached = True
data_fs.stop_cache(confirm)
def _getresource(self, path):
# type: (Text) -> bool
"""Get the internal resource for a path (Dir, File or None).
Arguments:
path (str): Path to a resource.
Returns:
resource: internal resource at the given path (Dir, File or None).
"""
_path = self.validatepath(path)
return data_fs._getresource(self._prep_path(_path))
def __repr__(self):
return f"{self.__class__.__name__}('{self.root_path}')"
@staticmethod
def _btopen(path, mode="r"):
"""Open the file (eg. return a BtIO object)"""
stream = data_fs.btopen(path, mode)
_mode = Mode(mode)
if _mode.truncate:
stream.seek(0)
stream.truncate()
if _mode.reading and _mode.writing:
stream = io.BufferedRandom(stream)
elif _mode.reading:
stream = io.BufferedReader(stream)
elif _mode.writing or _mode.appending:
stream = io.BufferedWriter(stream)
# if not _mode.reading:
# stream.readable = lambda: False # mock a write-only stream
# if not _mode.writing:
# stream.writable = lambda: False # mock a read-only stream
if _mode.appending:
stream.seek(0, 2) # io.SEEK_END
io_object = RawWrapper(stream, mode=mode, name=path)
return io_object
class WrapDatastoreFS(WrapFS):
def __init__(self, root_path=None):
self._temp_fs_url = "temp://__datastore_tempfs__"
# self._temp_fs_url = "mem://"
self._temp_fs = open_fs(self._temp_fs_url)
print(self._temp_fs)
# self._meta = {}
super().__init__(self._temp_fs)
@registry.install
class DatastoreOpener(Opener):
protocols = ["datastore"]
def open_fs(self, fs_url, parse_result, writeable, create, cwd):
data_fs = DatastoreFS()
return data_fs
def main():
data_fs = DatastoreFS("/")
# data_fs = WrapDatastoreFS()
# data_fs = open_fs("datastore://")
data_fs.tree()
data_fs.close()
return data_fs
if __name__ == "__main__":
result = main()
from pprint import pprint
pprint(result)
pprint(result.root_path)
pprint(result.root_res.__dict__)
|
the-stack_0_23398 | import logging
logger = logging.getLogger("milvus_benchmark.parser")
def operations_parser(operations):
""" get the type and params of test """
if not operations:
raise Exception("No operations in suite defined")
for run_type, run_params in operations.items():
logger.debug(run_type)
return run_type, run_params
return False, False
def collection_parser(collection_name):
"""
Resolve the collection name to obtain the corresponding configuration
e.g.:
sift_1m_128_l2
sift: type of data set
1m: size of the data inserted in the collection
128: vector dimension
l2: metric type
"""
tmp = collection_name.split("_")
# if len(tmp) != 5:
# return None
data_type = tmp[0]
collection_size_unit = tmp[1][-1]
collection_size = tmp[1][0:-1]
if collection_size_unit == "w":
collection_size = int(collection_size) * 10000
elif collection_size_unit == "m":
collection_size = int(collection_size) * 1000000
elif collection_size_unit == "b":
collection_size = int(collection_size) * 1000000000
dimension = int(tmp[2])
metric_type = str(tmp[3])
return data_type, collection_size, dimension, metric_type
def parse_ann_collection_name(collection_name):
"""
Analyze the collection name of the accuracy test and obtain the corresponding configuration
e.g.:
sift_128_euclidean
"""
data_type = collection_name.split("_")[0]
dimension = int(collection_name.split("_")[1])
metric = collection_name.split("_")[2]
# metric = collection_name.attrs['distance']
# dimension = len(collection_name["train"][0])
metric_type = ''
if metric == "euclidean":
metric_type = "l2"
elif metric == "angular":
metric_type = "ip"
elif metric == "jaccard":
metric_type = "jaccard"
elif metric == "hamming":
metric_type = "hamming"
return data_type, dimension, metric_type
def search_params_parser(param):
""" parser params of search interface and return top_ks, nqs, nprobes"""
# parse top-k, set default value if top-k not in param
if "top_ks" not in param:
top_ks = [10]
else:
top_ks = param["top_ks"]
if isinstance(top_ks, int):
top_ks = [top_ks]
elif isinstance(top_ks, list):
top_ks = list(top_ks)
else:
logger.warning("Invalid format top-ks: %s" % str(top_ks))
# parse nqs, set default value if nq not in param
if "nqs" not in param:
nqs = [10]
else:
nqs = param["nqs"]
if isinstance(nqs, int):
nqs = [nqs]
elif isinstance(nqs, list):
nqs = list(nqs)
else:
logger.warning("Invalid format nqs: %s" % str(nqs))
# parse nprobes
if "nprobes" not in param:
nprobes = [1]
else:
nprobes = param["nprobes"]
if isinstance(nprobes, int):
nprobes = [nprobes]
elif isinstance(nprobes, list):
nprobes = list(nprobes)
else:
logger.warning("Invalid format nprobes: %s" % str(nprobes))
return top_ks, nqs, nprobes
|
the-stack_0_23399 | from app import Filter
from .bw_class import detectionScratchLineMedian, detectionScratchLineStd, defectCorrection
class BWFilter(Filter):
def check(self, img):
"""
Vérifie si le problème corrigé par le filtre est présent sur l'image d'entrée
img : Un tableau Numpy RGB (576, 720, 3) de l'image
"""
h_median = detectionScratchLineMedian(img)
h_std = detectionScratchLineStd(img)
return (False, True)[h_median != 0 and h_std != 0]
def clean(self, img):
"""
Néttoie l'image du problème corrigé par le filtre
img : Un tableau Numpy RGB (576, 720, 3) de l'image
"""
h_median = detectionScratchLineMedian(img)
img_solved = defectCorrection(h_median, img)
h_std = detectionScratchLineStd(img)
img_solved_v2 = defectCorrection(h_std, img_solved)
return img_solved_v2 |
the-stack_0_23400 | import unittest
from robotide.action.actioninfo import ActionInfoCollection
from nose.tools import assert_equals
from robotide.context import IS_MAC
def _check_mac(value, expected, expected_mac):
if IS_MAC:
assert_equals(value, expected_mac)
else:
assert_equals(value, expected)
class HandlerMock(object):
def __init__(self, **handlers):
self.handlers = handlers
def __getattr__(self, name):
return self.handlers[name]
class TestActionInfoCollection(unittest.TestCase):
def test_create_entry(self):
data = """ [File]
Save | Save current suite or resource | Ctrl-S
Huba | HubaBuba
"""
handlers = HandlerMock(OnSave='save action', OnHuba='huba action')
infos = ActionInfoCollection(data, handlers)
assert_equals(infos[0].menu_name, 'File')
assert_equals(infos[0].name, 'Save')
assert_equals(infos[0].action, 'save action')
assert_equals(infos[0].shortcut.value, 'Ctrl-S')
_check_mac(infos[0].shortcut.printable, u'Ctrl-S', u'\u2303S')
assert_equals(infos[1].menu_name, 'File')
assert_equals(infos[1].name, 'Huba')
assert_equals(infos[1].action, 'huba action')
assert_equals(infos[1].shortcut.value, None)
def test_create_entry_with_multi_shortcut(self):
data = """ [Hopla]
Huba (Alt-D or CtrlCmd-H) | HubaBuba
"""
handlers = HandlerMock(OnHuba='huba action')
infos = ActionInfoCollection(data, handlers)
assert_equals(infos[0].menu_name, 'Hopla')
_check_mac(infos[0].name, u'Huba (Alt-D or Ctrl-H)', u'Huba (\u2325D or \u2318H)')
assert_equals(infos[0].action, 'huba action')
assert_equals(infos[0].shortcut.value, None)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_23401 | """
Script to use to maintain ceda directory index which is back end of archive browser
Useage:
ceda_dirs.py -h | --help
ceda_dirs.py <index> -d <log_directory> --moles-catalogue-mapping <moles_mapping>
Options:
-d Directory to keep a history of the logfiles scanned
"""
__author__ = "Richard Smith"
__date__ = "25 Jan 2019"
__copyright__ = "Copyright 2018 United Kingdom Research and Innovation"
__license__ = "BSD - see LICENSE file in top-level package directory"
__contact__ = "[email protected]"
import argparse
from ceda_elasticsearch_tools.core.log_reader import DepositLog, SpotMapping
from ceda_elasticsearch_tools.index_tools.index_updaters import CedaDirs
from ceda_elasticsearch_tools.core.utils import get_latest_log
from utils.path_tools import PathTools
from tqdm import tqdm
import os
import hashlib
import sys
from ConfigParser import ConfigParser
parser = argparse.ArgumentParser(
description="Script to use to maintain ceda directory index which is back end of archive browser"
)
parser.add_argument("--conf", dest="conf", required=True)
#################################################
# #
# Functions #
# #
#################################################
def make_logging_dir(directory):
"""
Make directory if it doesn't already exist
:param directory: Path to create
"""
if not os.path.isdir(directory):
os.makedirs(directory)
def check_logging_dir(directory, log):
"""
Check to see if the log has been processed already.
:param directory: Logging directory to test
:param log: Log name to be processed
:return: Bool, logging path
"""
log_root = os.path.splitext(log)[0]
action_output_filename = "{}_CEDA_DIRS_REPORT.txt".format(log_root)
action_output = os.path.join(directory, action_output_filename)
if action_output_filename in os.listdir(directory):
return True, action_output
else:
return False, action_output
#################################################
# #
# End of Functions #
# #
#################################################
def main():
"""
Process the logfiles and update elasticsearch index
:return:
"""
# Get command line arguments
args = parser.parse_args()
conf = ConfigParser()
conf.read(args.conf)
# Get the latest logs
deposit_logs = get_latest_log("/badc/ARCHIVE_INFO/deposit_logs", "deposit_ingest", rank=-2)
# deposit_logs = ['deposit_ingest1.ceda.ac.uk_20180824.log']
# Check to see if logging directory exists
make_logging_dir(conf.get("files", "status-directory"))
# Initialise ceda dirs updater
cd = CedaDirs(index=conf.get("elasticsearch", "es-index"), host_url=conf.get("elasticsearch", "es-host"), **{
"http_auth": (
conf.get("elasticsearch", "es-user"),
conf.get("elasticsearch", "es-password")
)
})
# Prepare path tools
if conf.get("files", "moles-mapping"):
pt = PathTools(moles_mapping=conf.get("files", "moles-mapping"))
else:
pt = PathTools()
for log in deposit_logs:
# Read deposit logs
dl = DepositLog(log_filename=log)
# Check to see if log has already been processed
processed, logging_path = check_logging_dir(conf.get("files", "status-directory"), log)
# Skip processing if log has already been processed
if processed:
continue
#################################################
# #
# Process directory creations #
# #
#################################################
content_list = []
result_list = []
for dir in tqdm(dl.mkdir_list, desc="Processing creations", file=sys.stdout):
metadata, islink = pt.generate_path_metadata(dir)
if metadata:
content_list.append({
"id": hashlib.sha1(metadata["path"]).hexdigest(),
"document": metadata
})
result = cd.add_dirs(content_list)
result_list.append(
"New dirs: {} Operation status: {}".format(
len(dl.mkdir_list),
result
)
)
#################################################
# #
# Process directory deletions #
# #
#################################################
deletion_list = []
for dir in tqdm(dl.rmdir_list, desc="Processing deletions", file=sys.stdout):
deletion_list.append({"id": hashlib.sha1(dir).hexdigest()})
result = cd.delete_dirs(deletion_list)
result_list.append(
"Deleted dirs: {} Operation status: {}".format(
len(dl.rmdir_list),
result
)
)
#################################################
# #
# Process symlinks #
# #
#################################################
# If there are symlink actions in the deposit log. Process the directory as if
# it is a new directory.
content_list = []
for dir in tqdm(dl.symlink_list, desc="Processing symlinks", file=sys.stdout):
metadata, islink = pt.generate_path_metadata(dir)
if metadata:
content_list.append({
"id": hashlib.sha1(metadata["path"]).hexdigest(),
"document": metadata
})
result = cd.add_dirs(content_list)
result_list.append(
"Symlinked dirs: {} Operation status: {}".format(
len(dl.symlink_list),
result
)
)
#################################################
# #
# Process 00READMEs #
# #
#################################################
content_list = []
for readme in tqdm(dl.readme00_list, desc="Processing 00READMEs", file=sys.stdout):
path = os.path.dirname(readme)
content = pt.get_readme(path)
if content:
content_list.append({
"id": hashlib.sha1(path).hexdigest(),
"document": {"readme": content}
})
result = cd.update_readmes(content_list)
result_list.append(
"Added 00READMEs: {} Operation status: {}".format(
len(dl.readme00_list),
result
)
)
#################################################
# #
# Write log file #
# #
#################################################
# Write log file
with open(logging_path, 'w') as writer:
writer.writelines(map(lambda x: x + "\n", result_list))
#################################################
# #
# Process Spot Roots #
# #
#################################################
spot_log = SpotMapping()
spot_paths = spot_log.path2spotmapping.keys()
content_list = []
for spot in tqdm(spot_paths, desc="Processing spot roots", file=sys.stdout):
metadata, islink = pt.generate_path_metadata(spot)
if metadata:
content_list.append({
"id": hashlib.sha1(metadata["path"]).hexdigest(),
"document": metadata
})
result = cd.add_dirs(content_list)
pt.update_moles_mapping()
print("Spot dirs: {} Operation status: {}".format(
len(spot_log),
result
))
if __name__ == "__main__":
main()
|
the-stack_0_23402 | # -*- coding:utf-8 -*-
import os
import numpy as np
from PIL import Image
#将警告变为异常可以捕捉
import warnings
warnings.filterwarnings("error", category=UserWarning)
##由于网上爬虫爬的图,有好多是错误的会有EXIF警告,需要处理,
#还有一些图像打不开因此需要处理
###### 删除一些破损的图像,以防在训练模型中报错
path = '../datasets'
filelists = os.listdir(path)
input_size=300
def center_img(img, size=None, fill_value=255):
"""
center img in a square background
"""
h, w = img.shape[:2]
if size is None:
size = max(h, w)
shape = (size, size) + img.shape[2:]
background = np.full(shape, fill_value, np.uint8)
center_x = (size - w) // 2
center_y = (size - h) // 2
background[center_y:center_y + h, center_x:center_x + w] = img
return background
def preprocess_img(img):
"""
image preprocessing
you can add your special preprocess method here
"""
resize_scale = input_size / max(img.size[:2])
img = img.resize((int(img.size[0] * resize_scale), int(img.size[1] * resize_scale)))
img = img.convert('RGB')
img = np.array(img)
img = img[:, :, ::-1]
img = center_img(img,input_size)
return img
for _file in filelists:
if _file.endswith('.txt'):
pass
else:
try:
img_path = os.path.join(path,_file)
img = Image.open(img_path)
img = preprocess_img(img)
except:
os.remove(img_path)
os.remove(os.path.join(path, _file.split('.')[0] + '.txt'))
print(img_path)
|
the-stack_0_23403 | #
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from dateutil.relativedelta import relativedelta
from .parse_common import is_numeric, look_for_fractions
# TODO: short_scale and ordinals don't do anything here.
# The parameters are present in the function signature for API compatibility
# reasons.
def extractnumber_sv(text, short_scale=True, ordinals=False):
"""
This function prepares the given text for parsing by making
numbers consistent, getting rid of contractions, etc.
Args:
text (str): the string to normalize
Returns:
(int) or (float): The value of extracted number
"""
text = text.lower()
aWords = text.split()
and_pass = False
valPreAnd = False
val = False
count = 0
while count < len(aWords):
word = aWords[count]
if is_numeric(word):
val = float(word)
elif word == "första":
val = 1
elif word == "andra":
val = 2
elif word == "tredje":
val = 3
elif word == "fjärde":
val = 4
elif word == "femte":
val = 5
elif word == "sjätte":
val = 6
elif is_fractional_sv(word):
val = is_fractional_sv(word)
else:
if word == "en":
val = 1
if word == "ett":
val = 1
elif word == "två":
val = 2
elif word == "tre":
val = 3
elif word == "fyra":
val = 4
elif word == "fem":
val = 5
elif word == "sex":
val = 6
elif word == "sju":
val = 7
elif word == "åtta":
val = 8
elif word == "nio":
val = 9
elif word == "tio":
val = 10
if val:
if count < (len(aWords) - 1):
wordNext = aWords[count + 1]
else:
wordNext = ""
valNext = is_fractional_sv(wordNext)
if valNext:
val = val * valNext
aWords[count + 1] = ""
if not val:
# look for fractions like "2/3"
aPieces = word.split('/')
if look_for_fractions(aPieces):
val = float(aPieces[0]) / float(aPieces[1])
elif and_pass:
# added to value, quit here
val = valPreAnd
break
else:
count += 1
continue
aWords[count] = ""
if and_pass:
aWords[count - 1] = '' # remove "och"
val += valPreAnd
elif count + 1 < len(aWords) and aWords[count + 1] == 'och':
and_pass = True
valPreAnd = val
val = False
count += 2
continue
elif count + 2 < len(aWords) and aWords[count + 2] == 'och':
and_pass = True
valPreAnd = val
val = False
count += 3
continue
break
return val or False
def extract_datetime_sv(string, currentDate, default_time):
def clean_string(s):
"""
cleans the input string of unneeded punctuation and capitalization
among other things.
"""
s = s.lower().replace('?', '').replace('.', '').replace(',', '') \
.replace(' den ', ' ').replace(' en ', ' ')
wordList = s.split()
for idx, word in enumerate(wordList):
word = word.replace("'s", "")
ordinals = ["rd", "st", "nd", "th"]
if word[0].isdigit():
for ordinal in ordinals:
if ordinal in word:
word = word.replace(ordinal, "")
wordList[idx] = word
return wordList
def date_found():
return found or \
(
datestr != "" or timeStr != "" or
yearOffset != 0 or monthOffset != 0 or
dayOffset is True or hrOffset != 0 or
hrAbs or minOffset != 0 or
minAbs or secOffset != 0
)
if string == "" or not currentDate:
return None
found = False
daySpecified = False
dayOffset = False
monthOffset = 0
yearOffset = 0
dateNow = currentDate
today = dateNow.strftime("%w")
currentYear = dateNow.strftime("%Y")
fromFlag = False
datestr = ""
hasYear = False
timeQualifier = ""
timeQualifiersList = ['morgon', 'förmiddag', 'eftermiddag', 'kväll']
markers = ['på', 'i', 'den här', 'kring', 'efter']
days = ['måndag', 'tisdag', 'onsdag', 'torsdag',
'fredag', 'lördag', 'söndag']
months = ['januari', 'februari', 'mars', 'april', 'maj', 'juni',
'juli', 'augusti', 'september', 'oktober', 'november',
'december']
monthsShort = ['jan', 'feb', 'mar', 'apr', 'may', 'june', 'july', 'aug',
'sept', 'oct', 'nov', 'dec']
words = clean_string(string)
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
# this isn't in clean string because I don't want to save back to words
word = word.rstrip('s')
start = idx
used = 0
# save timequalifier for later
if word in timeQualifiersList:
timeQualifier = word
# parse today, tomorrow, day after tomorrow
elif word == "idag" and not fromFlag:
dayOffset = 0
used += 1
elif word == "imorgon" and not fromFlag:
dayOffset = 1
used += 1
elif word == "morgondagen" or word == "morgondagens" and not fromFlag:
dayOffset = 1
used += 1
elif word == "övermorgon" and not fromFlag:
dayOffset = 2
used += 1
# parse 5 days, 10 weeks, last week, next week
elif word == "dag" or word == "dagar":
if wordPrev[0].isdigit():
dayOffset += int(wordPrev)
start -= 1
used = 2
elif word == "vecka" or word == "veckor" and not fromFlag:
if wordPrev[0].isdigit():
dayOffset += int(wordPrev) * 7
start -= 1
used = 2
elif wordPrev == "nästa":
dayOffset = 7
start -= 1
used = 2
elif wordPrev == "förra":
dayOffset = -7
start -= 1
used = 2
# parse 10 months, next month, last month
elif word == "månad" and not fromFlag:
if wordPrev[0].isdigit():
monthOffset = int(wordPrev)
start -= 1
used = 2
elif wordPrev == "nästa":
monthOffset = 1
start -= 1
used = 2
elif wordPrev == "förra":
monthOffset = -1
start -= 1
used = 2
# parse 5 years, next year, last year
elif word == "år" and not fromFlag:
if wordPrev[0].isdigit():
yearOffset = int(wordPrev)
start -= 1
used = 2
elif wordPrev == "nästa":
yearOffset = 1
start -= 1
used = 2
elif wordPrev == "förra":
yearOffset = -1
start -= 1
used = 2
# parse Monday, Tuesday, etc., and next Monday,
# last Tuesday, etc.
elif word in days and not fromFlag:
d = days.index(word)
dayOffset = (d + 1) - int(today)
used = 1
if dayOffset < 0:
dayOffset += 7
if wordPrev == "nästa":
dayOffset += 7
used += 1
start -= 1
elif wordPrev == "förra":
dayOffset -= 7
used += 1
start -= 1
# parse 15 of July, June 20th, Feb 18, 19 of February
elif word in months or word in monthsShort and not fromFlag:
try:
m = months.index(word)
except ValueError:
m = monthsShort.index(word)
used += 1
datestr = months[m]
if wordPrev and (wordPrev[0].isdigit() or
(wordPrev == "of" and wordPrevPrev[0].isdigit())):
if wordPrev == "of" and wordPrevPrev[0].isdigit():
datestr += " " + words[idx - 2]
used += 1
start -= 1
else:
datestr += " " + wordPrev
start -= 1
used += 1
if wordNext and wordNext[0].isdigit():
datestr += " " + wordNext
used += 1
hasYear = True
else:
hasYear = False
elif wordNext and wordNext[0].isdigit():
datestr += " " + wordNext
used += 1
if wordNextNext and wordNextNext[0].isdigit():
datestr += " " + wordNextNext
used += 1
hasYear = True
else:
hasYear = False
# parse 5 days from tomorrow, 10 weeks from next thursday,
# 2 months from July
validFollowups = days + months + monthsShort
validFollowups.append("idag")
validFollowups.append("imorgon")
validFollowups.append("nästa")
validFollowups.append("förra")
validFollowups.append("nu")
if (word == "från" or word == "efter") and wordNext in validFollowups:
used = 2
fromFlag = True
if wordNext == "imorgon":
dayOffset += 1
elif wordNext in days:
d = days.index(wordNext)
tmpOffset = (d + 1) - int(today)
used = 2
if tmpOffset < 0:
tmpOffset += 7
dayOffset += tmpOffset
elif wordNextNext and wordNextNext in days:
d = days.index(wordNextNext)
tmpOffset = (d + 1) - int(today)
used = 3
if wordNext == "nästa":
tmpOffset += 7
used += 1
start -= 1
elif wordNext == "förra":
tmpOffset -= 7
used += 1
start -= 1
dayOffset += tmpOffset
if used > 0:
if start - 1 > 0 and words[start - 1] == "denna":
start -= 1
used += 1
for i in range(0, used):
words[i + start] = ""
if start - 1 >= 0 and words[start - 1] in markers:
words[start - 1] = ""
found = True
daySpecified = True
# parse time
timeStr = ""
hrOffset = 0
minOffset = 0
secOffset = 0
hrAbs = None
minAbs = None
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
# parse noon, midnight, morning, afternoon, evening
used = 0
if word == "middag":
hrAbs = 12
used += 1
elif word == "midnatt":
hrAbs = 0
used += 1
elif word == "morgon":
if not hrAbs:
hrAbs = 8
used += 1
elif word == "förmiddag":
if not hrAbs:
hrAbs = 10
used += 1
elif word == "eftermiddag":
if not hrAbs:
hrAbs = 15
used += 1
elif word == "kväll":
if not hrAbs:
hrAbs = 19
used += 1
# parse half an hour, quarter hour
elif wordPrev in markers or wordPrevPrev in markers:
if word == "halvtimme" or word == "halvtimma":
minOffset = 30
elif word == "kvart":
minOffset = 15
elif word == "timme" or word == "timma":
hrOffset = 1
words[idx - 1] = ""
used += 1
hrAbs = -1
minAbs = -1
# parse 5:00 am, 12:00 p.m., etc
elif word[0].isdigit():
isTime = True
strHH = ""
strMM = ""
remainder = ""
if ':' in word:
# parse colons
# "3:00 in the morning"
stage = 0
length = len(word)
for i in range(length):
if stage == 0:
if word[i].isdigit():
strHH += word[i]
elif word[i] == ":":
stage = 1
else:
stage = 2
i -= 1
elif stage == 1:
if word[i].isdigit():
strMM += word[i]
else:
stage = 2
i -= 1
elif stage == 2:
remainder = word[i:].replace(".", "")
break
if remainder == "":
nextWord = wordNext.replace(".", "")
if nextWord == "am" or nextWord == "pm":
remainder = nextWord
used += 1
elif nextWord == "tonight":
remainder = "pm"
used += 1
elif wordNext == "in" and wordNextNext == "the" and \
words[idx + 3] == "morning":
remainder = "am"
used += 3
elif wordNext == "in" and wordNextNext == "the" and \
words[idx + 3] == "afternoon":
remainder = "pm"
used += 3
elif wordNext == "in" and wordNextNext == "the" and \
words[idx + 3] == "evening":
remainder = "pm"
used += 3
elif wordNext == "in" and wordNextNext == "morning":
remainder = "am"
used += 2
elif wordNext == "in" and wordNextNext == "afternoon":
remainder = "pm"
used += 2
elif wordNext == "in" and wordNextNext == "evening":
remainder = "pm"
used += 2
elif wordNext == "this" and wordNextNext == "morning":
remainder = "am"
used = 2
elif wordNext == "this" and wordNextNext == "afternoon":
remainder = "pm"
used = 2
elif wordNext == "this" and wordNextNext == "evening":
remainder = "pm"
used = 2
elif wordNext == "at" and wordNextNext == "night":
if strHH > 5:
remainder = "pm"
else:
remainder = "am"
used += 2
else:
if timeQualifier != "":
if strHH <= 12 and \
(timeQualifier == "evening" or
timeQualifier == "afternoon"):
strHH += 12
else:
# try to parse # s without colons
# 5 hours, 10 minutes etc.
length = len(word)
strNum = ""
remainder = ""
for i in range(length):
if word[i].isdigit():
strNum += word[i]
else:
remainder += word[i]
if remainder == "":
remainder = wordNext.replace(".", "").lstrip().rstrip()
if (
remainder == "pm" or
wordNext == "pm" or
remainder == "p.m." or
wordNext == "p.m."):
strHH = strNum
remainder = "pm"
used = 1
elif (
remainder == "am" or
wordNext == "am" or
remainder == "a.m." or
wordNext == "a.m."):
strHH = strNum
remainder = "am"
used = 1
else:
if wordNext == "pm" or wordNext == "p.m.":
strHH = strNum
remainder = "pm"
used = 1
elif wordNext == "am" or wordNext == "a.m.":
strHH = strNum
remainder = "am"
used = 1
elif (
int(word) > 100 and
(
wordPrev == "o" or
wordPrev == "oh"
)):
# 0800 hours (pronounced oh-eight-hundred)
strHH = int(word) / 100
strMM = int(word) - strHH * 100
if wordNext == "hours":
used += 1
elif (
wordNext == "hours" and
word[0] != '0' and
(
int(word) < 100 and
int(word) > 2400
)):
# "in 3 hours"
hrOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "minutes":
# "in 10 minutes"
minOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "seconds":
# in 5 seconds
secOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif int(word) > 100:
strHH = int(word) / 100
strMM = int(word) - strHH * 100
if wordNext == "hours":
used += 1
elif wordNext[0].isdigit():
strHH = word
strMM = wordNext
used += 1
if wordNextNext == "hours":
used += 1
elif (
wordNext == "" or wordNext == "o'clock" or
(
wordNext == "in" and
(
wordNextNext == "the" or
wordNextNext == timeQualifier
)
)):
strHH = word
strMM = 00
if wordNext == "o'clock":
used += 1
if wordNext == "in" or wordNextNext == "in":
used += (1 if wordNext == "in" else 2)
if (wordNextNext and
wordNextNext in timeQualifier or
(words[words.index(wordNextNext) + 1] and
words[words.index(wordNextNext) + 1] in
timeQualifier)):
if (wordNextNext == "afternoon" or
(len(words) >
words.index(wordNextNext) + 1 and
words[words.index(
wordNextNext) + 1] == "afternoon")):
remainder = "pm"
if (wordNextNext == "evening" or
(len(words) >
(words.index(wordNextNext) + 1) and
words[words.index(
wordNextNext) + 1] == "evening")):
remainder = "pm"
if (wordNextNext == "morning" or
(len(words) >
words.index(wordNextNext) + 1 and
words[words.index(
wordNextNext) + 1] == "morning")):
remainder = "am"
else:
isTime = False
strHH = int(strHH) if strHH else 0
strMM = int(strMM) if strMM else 0
strHH = strHH + 12 if remainder == "pm" and strHH < 12 else strHH
strHH = strHH - 12 if remainder == "am" and strHH >= 12 else strHH
if strHH > 24 or strMM > 59:
isTime = False
used = 0
if isTime:
hrAbs = strHH * 1
minAbs = strMM * 1
used += 1
if used > 0:
# removed parsed words from the sentence
for i in range(used):
words[idx + i] = ""
if wordPrev == "o" or wordPrev == "oh":
words[words.index(wordPrev)] = ""
if wordPrev == "early":
hrOffset = -1
words[idx - 1] = ""
idx -= 1
elif wordPrev == "late":
hrOffset = 1
words[idx - 1] = ""
idx -= 1
if idx > 0 and wordPrev in markers:
words[idx - 1] = ""
if idx > 1 and wordPrevPrev in markers:
words[idx - 2] = ""
idx += used - 1
found = True
# check that we found a date
if not date_found:
return None
if dayOffset is False:
dayOffset = 0
# perform date manipulation
extractedDate = dateNow
extractedDate = extractedDate.replace(microsecond=0,
second=0,
minute=0,
hour=0)
if datestr != "":
temp = datetime.strptime(datestr, "%B %d")
if not hasYear:
temp = temp.replace(year=extractedDate.year)
if extractedDate < temp:
extractedDate = extractedDate.replace(year=int(currentYear),
month=int(
temp.strftime(
"%m")),
day=int(temp.strftime(
"%d")))
else:
extractedDate = extractedDate.replace(
year=int(currentYear) + 1,
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")))
else:
extractedDate = extractedDate.replace(
year=int(temp.strftime("%Y")),
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")))
if timeStr != "":
temp = datetime(timeStr)
extractedDate = extractedDate.replace(hour=temp.strftime("%H"),
minute=temp.strftime("%M"),
second=temp.strftime("%S"))
if yearOffset != 0:
extractedDate = extractedDate + relativedelta(years=yearOffset)
if monthOffset != 0:
extractedDate = extractedDate + relativedelta(months=monthOffset)
if dayOffset != 0:
extractedDate = extractedDate + relativedelta(days=dayOffset)
if hrAbs is None and minAbs is None and default_time:
hrAbs = default_time.hour
minAbs = default_time.minute
if hrAbs != -1 and minAbs != -1:
extractedDate = extractedDate + relativedelta(hours=hrAbs or 0,
minutes=minAbs or 0)
if (hrAbs or minAbs) and datestr == "":
if not daySpecified and dateNow > extractedDate:
extractedDate = extractedDate + relativedelta(days=1)
if hrOffset != 0:
extractedDate = extractedDate + relativedelta(hours=hrOffset)
if minOffset != 0:
extractedDate = extractedDate + relativedelta(minutes=minOffset)
if secOffset != 0:
extractedDate = extractedDate + relativedelta(seconds=secOffset)
for idx, word in enumerate(words):
if words[idx] == "and" and words[idx - 1] == "" and words[
idx + 1] == "":
words[idx] = ""
resultStr = " ".join(words)
resultStr = ' '.join(resultStr.split())
return [extractedDate, resultStr]
def is_fractional_sv(input_str):
"""
This function takes the given text and checks if it is a fraction.
Args:
input_str (str): the string to check if fractional
Returns:
(bool) or (float): False if not a fraction, otherwise the fraction
"""
if input_str.endswith('ars', -3):
input_str = input_str[:len(input_str) - 3] # e.g. "femtedelar"
if input_str.endswith('ar', -2):
input_str = input_str[:len(input_str) - 2] # e.g. "femtedelar"
if input_str.endswith('a', -1):
input_str = input_str[:len(input_str) - 1] # e.g. "halva"
if input_str.endswith('s', -1):
input_str = input_str[:len(input_str) - 1] # e.g. "halva"
aFrac = ["hel", "halv", "tredjedel", "fjärdedel", "femtedel", "sjättedel",
"sjundedel", "åttondel", "niondel", "tiondel", "elftedel",
"tolftedel"]
if input_str.lower() in aFrac:
return 1.0 / (aFrac.index(input_str) + 1)
if input_str == "kvart":
return 1.0 / 4
if input_str == "trekvart":
return 3.0 / 4
return False
def normalize_sv(text, remove_articles):
""" English string normalization """
words = text.split() # this also removed extra spaces
normalized = ''
for word in words:
# Convert numbers into digits, e.g. "two" -> "2"
if word == 'en':
word = 'ett'
textNumbers = ["noll", "ett", "två", "tre", "fyra", "fem", "sex",
"sju", "åtta", "nio", "tio", "elva", "tolv",
"tretton", "fjorton", "femton", "sexton",
"sjutton", "arton", "nitton", "tjugo"]
if word in textNumbers:
word = str(textNumbers.index(word))
normalized += " " + word
return normalized[1:] # strip the initial space
|
the-stack_0_23404 | import csv
import os
with open("Resources/budget_data.csv") as csv_file:
Months = 0
Profits = []
Dates = []
csv_reader = csv.reader(csv_budget_data.csv)
for row in open("budget_data.csv"):
num_rows += 1
print(num_rows)
csvreader = csv.reader(csv_file)
first_row = next(csvreader)
for row in csvreader:
total_months += 1
total_net_change += int(row[1])
print(net_change_list)
max_value = max(numbers)
print('Maximum value:', max_value)
mean_value = mean(numbers)
print('Mean value:', mean_value)
range_value = range("Profit")
print(range_value)
range_vaue = range("Losses")
print(range_value)
budget_csv = os.path.join('budget_data.csv')
#Open the CSV
with open(budget_csv, newline="") as csvfile:
budget_reader = csv.reader(csvfile, delimiter=",")
#Skip the header row
next(budget_reader)
#Loop through the CSV file
for row in budget_reader:
#Add date
months.append(row[0])
#Add Profit/Loss
profit_loss.append(float(row[1]))
#Calculate the total months included in the data set
total_months = (len(months))
#Calculate the net amount of Profit/Losses over the period of time
net_amount = sum(profit_loss)
#Calculate the average change per month
avg_change = net_amount / total_months
#Calculate the greatest increase in profits (date and amount)
max_profit = max(profit_loss)
#Using the index of the greatest increase to find the date
index_max = profit_loss.index(max_profit)
max_month = months[index_max]
#Calculate the greatest decrease in loss (date and amount)
min_profit = min(profit_loss)
#Using the index of the greatest decrease to find the date
index_min = profit_loss.index(min_profit)
min_month = months[index_min]
financial_analysis = (f'''Financial Analysis
----------------------------------
Total Months: {total_months}
Total: ${net_amount:.2f}
Average Change: {avg_change:.2f}
Greatest Increase in Profits: {max_month} {max_profit:.2f}
Greatest Decrease in Profits: {min_month} {min_profit:.2f}''')
#Print out analysis
print(financial_analysis)
#Create a .txt file containing the same analysis in the print out
analysis = open('financial_analysis.txt', 'w')
analysis.write(financial_analysis)
analysis.close() |
the-stack_0_23408 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.ise.plugins.plugin_utils.ise import (
ISESDK,
ise_argument_spec,
)
# Get common arguements specification
argument_spec = ise_argument_spec()
# Add arguments specific for this module
argument_spec.update(dict(
operationType=dict(type="str"),
resourceMediaType=dict(type="str"),
))
required_if = []
required_one_of = []
mutually_exclusive = []
required_together = []
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = False
self._supports_check_mode = False
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(
required_if=required_if,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def get_object(self, params):
new_object = dict(
operation_type=params.get("operationType"),
resource_media_type=params.get("resourceMediaType"),
)
return new_object
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
ise = ISESDK(params=self._task.args)
response = ise.exec(
family="security_groups",
function='bulk_request_for_security_group',
params=self.get_object(self._task.args),
).response
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
|
the-stack_0_23409 | import textwrap
import time
available_clocks = [
('monotonic', time.monotonic),
('perf_counter', time.perf_counter),
('process_time', time.process_time),
('time', time.time),
]
for clock_name, func in available_clocks:
print(textwrap.dedent('''\
{name}:
adjustable : {info.adjustable}
implementation: {info.implementation}
monotonic: {info.monotonic}
resolution: {info.resolution}
current: {current}
''').format(name=clock_name, info=time.get_clock_info(clock_name), current=func()))
|
the-stack_0_23410 | #standard
import sys
import time
#TCA
from TCARandom import Random_generator
from TCALoadControl import ControlFiles
from TCAAlgorithm import TCA_Algorithm
from TCAFileReader import Trajectories
# from TCAOutput import TCA_Output
from TCACore import Timer, logger, report_errors, clear_old_files, write_default, write_default_regions, control_values, strategy_values
from TCARegions import Load_Regions
from TCASpacePartitioning import Location_Tree
from TCANetData import CLNetData
import argparse
def main():
"""
TCA main procedure
"""
parser = argparse.ArgumentParser(description = 'The Trajectory Conversion Algorithm (TCA) Software is designed to test different strategies for producing, transmitting, and storing Connected \
Vehicle information.The TCA reads in and uses vehicle trajectory information, Roadside Equipment (RSE) location information, cellular region information, event region information, and strategy \
information to produce a series of snapshots that the vehicle would produce. Vehicles can be equipped to generate and transmit Probe Data Messages (PDMs), Basic Safety Messages (BSMs), ITS SPOT \
messages, or European CAM which can be transmitted by either Dedicated Short Range Communication (DSRC), cellular, or both.')
parser.add_argument('input_file', help = 'TCA input file', default = 'TCAinput.xml', nargs = '?')
parser.add_argument('--makeInput', help = 'Make input file with specified filename, strategy file(Default_Strategy.xml), Regions File(Default_Regions.xml)')
parser.add_argument('--makeStrategy', help = 'Make strategy file with specified filename')
parser.add_argument('--makeRegions', help = 'Make Regions file with specified filename')
args = parser.parse_args()
if args.makeInput or args.makeStrategy or args.makeRegions:
if args.makeInput:
write_default(args.makeInput, 'ControlFile', control_values)
write_default("Default_Strategy.xml", 'Strategy', strategy_values)
write_default_regions('Default_Regions.xml')
if args.makeStrategy:
write_default(args.makeStrategy, 'Strategy', strategy_values)
if args.makeRegions:
write_default_regions(args.makeRegions)
sys.exit()
program_st = time.time()
CF = ControlFiles(args.input_file)
CF.Load_files()
if CF.Control["FileType"] == "VISSIM":
unit_conversion = 100 / 2.54 / 12 # Converts meters to ft (for x,y coordinates)
else:
unit_conversion = 1
if CF.Control['RegionsFile'] is not None:
Regions = Load_Regions(CF.Control['RegionsFile'], CF.Control['Seed'], unit_conversion)
if CF.Control["OutputLevel"] > 0:
logger.info("Loading Regions File %s" % CF.Control['RegionsFile'])
else:
Regions = None
if CF.Control["RSELocationFile"] is not None:
RSEs = CLNetData(unit_conversion)
errors = RSEs.RSELoad(CF.Control["RSELocationFile"], CF)
report_errors(errors)
RSE_Tree = Location_Tree(RSEs.RSEList, CF.Strategy["MinRSERange"])
else:
RSEs = None
RSE_Tree = None
if CF.Control["SPOTLocationFile"] is not None:
SPOTdevices = CLNetData(unit_conversion)
errors = SPOTdevices.SPOTLoad(CF.Control["SPOTLocationFile"], CF)
report_errors(errors)
SPOT_Tree = Location_Tree(SPOTdevices.SPOTList, CF.Strategy["SPOTdeviceRange"])
else:
SPOT_Tree = None
trajectory_file = Trajectories(CF.Control['TrajectoryFileName'], CF.Control['OutputLevel'], CF.Control["FileType"])
clear_old_files(CF)
Algorithm = TCA_Algorithm(CF, RSEs, RSE_Tree, CF.Control['Seed'], Regions, SPOT_Tree)
LastTP = 0.0
for tp, vehicles in trajectory_file.read_by_tp(CF):
if CF.Control["OutputLevel"] > 0:
if tp % 1000 == 0:
logger.info("Time Step: %d" % (tp))
# Remove vehicle data of vehicles not seen in the concurrent timestep
Algorithm.tbl.remove_non_active_vehicles(vehicles)
vehicles_data = Algorithm.pull_veh_data(vehicles, tp)
if RSE_Tree is not None:
range_data = RSE_Tree.find_ranges(vehicles_data)
for veh_data in vehicles_data:
#if vehicle equipped
if veh_data is not None:
if veh_data['BSM_equipped']:
Algorithm.BSM.CheckBrakes(veh_data)
if CF.Control['RegionsFile'] is not None:
Regions.CheckRegions(veh_data, tp)
#if SPOT equipped
if veh_data['SPOT_equipped']:
Algorithm.SPOT.CheckMessage(veh_data, tp)
Algorithm.SPOT.CheckRange(veh_data, tp)
#if PDM equipped
if (tp % 1 ==0) and (veh_data['PDM_equipped']):
Algorithm.PDM.CheckMessage(veh_data, tp)
if veh_data['DSRC_enabled'] and CF.Control['RSELocationFile'] != None:
Algorithm.CheckDSRC(veh_data, tp, range_data)
Algorithm.CheckCellular(veh_data, tp)
if (tp % 1 ==0) and (veh_data['PDM_equipped']):
Algorithm.PDM.PSNCheck(veh_data, tp)
if veh_data['BSM_equipped']:
Algorithm.BSM.tmp_ID_check(veh_data, tp)
Algorithm.BSM.Write()
Algorithm.CAM.Write()
Algorithm.tbl.previous_values(veh_data)
LastTP = tp
if len(Algorithm.BSM.BSM_list) > 0 :
Algorithm.BSM.Write(clear_buffer=True)
if len(Algorithm.CAM.CAM_list) > 0 :
Algorithm.CAM.Write(clear_all=True)
if len(Algorithm.PDM.PDM_list) > 0 :
Algorithm.PDM.Write(clear_buffer=True, LastTP = LastTP)
if len(Algorithm.SPOT.Travelmsgs) > 0:
Algorithm.SPOT.Write(clear_all=True)
if CF.Control["OutputLevel"] > 0:
Algorithm.tbl.list_veh_counts()
if CF.Control["OutputLevel"] > 0:
ed_time = time.time()
logger.info("End time %s (%f)" % (time.strftime('%X', time.localtime(ed_time)), (ed_time - program_st) ))
logger.info("******************* End Program *******************")
del Algorithm
if __name__ == '__main__':
main() |
the-stack_0_23412 | """A regression test for automatic benchmarking garage-TensorFlow-VPG."""
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GymEnv, normalize
from garage.experiment import deterministic
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import VPG as TF_VPG
from garage.tf.policies import GaussianMLPPolicy as TF_GMP
from garage.trainer import TFTrainer
hyper_parameters = {
'hidden_sizes': [64, 64],
'center_adv': True,
'learning_rate': 1e-2,
'discount': 0.99,
'n_epochs': 250,
'batch_size': 2048,
}
@wrap_experiment
def vpg_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow VPG model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = TF_GMP(
env_spec=env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TF_VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
discount=hyper_parameters['discount'],
center_adv=hyper_parameters['center_adv'],
optimizer_args=dict(
learning_rate=hyper_parameters['learning_rate'], ))
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
|
the-stack_0_23414 | # Copyright The IETF Trust 2016-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import io
import os
import sys
import time
from pathlib import Path
from textwrap import dedent
from xym import xym
from django.conf import settings
from django.core.management.base import BaseCommand
import debug # pyflakes:ignore
class Command(BaseCommand):
"""
Populate the yang module repositories from drafts and RFCs.
Extracts yang models from RFCs (found in settings.RFC_PATH and places
them in settings.SUBMIT_YANG_RFC_MODEL_DIR, and from active drafts, placed in
settings.SUBMIT_YANG_DRAFT_MODEL_DIR.
"""
help = dedent(__doc__).strip()
def add_arguments(self, parser):
parser.add_argument('--clean',
action='store_true', dest='clean', default=False,
help='Remove the current directory content before writing new models.')
def handle(self, *filenames, **options):
"""
* All yang modules from published RFCs should be extracted and be
available in an rfc-yang repository.
* All valid yang modules from active, not replaced, internet drafts
should be extracted and be available in a draft-valid-yang repository.
* All, valid and invalid, yang modules from active, not replaced,
internet drafts should be available in a draft-all-yang repository.
(Actually, given precedence ordering, it would be enough to place
non-validating modules in a draft-invalid-yang repository instead).
* In all cases, example modules should be excluded.
* Precedence is established by the search order of the repository as
provided to pyang.
* As drafts expire, models should be removed in order to catch cases
where a module being worked on depends on one which has slipped out
of the work queue.
"""
verbosity = int(options.get('verbosity'))
def extract_from(file, dir, strict=True):
saved_stdout = sys.stdout
saved_stderr = sys.stderr
xymerr = io.StringIO()
xymout = io.StringIO()
sys.stderr = xymerr
sys.stdout = xymout
model_list = []
try:
model_list = xym.xym(str(file), str(file.parent), str(dir), strict=strict, debug_level=verbosity-2)
for name in model_list:
modfile = moddir / name
mtime = file.stat().st_mtime
os.utime(str(modfile), (mtime, mtime))
if '"' in name:
name = name.replace('"', '')
modfile.rename(str(moddir/name))
model_list = [ n.replace('"','') for n in model_list ]
except Exception as e:
self.stderr.write("** Error when extracting from %s: %s" % (file, str(e)))
finally:
sys.stdout = saved_stdout
sys.stderr = saved_stderr
#
if verbosity > 1:
outmsg = xymout.getvalue()
if outmsg.strip():
self.stdout.write(outmsg)
if verbosity>2:
errmsg = xymerr.getvalue()
if errmsg.strip():
self.stderr.write(errmsg)
return model_list
# Extract from new RFCs
rfcdir = Path(settings.RFC_PATH)
moddir = Path(settings.SUBMIT_YANG_RFC_MODEL_DIR)
if not moddir.exists():
moddir.mkdir(parents=True)
latest = 0
for item in moddir.iterdir():
if item.stat().st_mtime > latest:
latest = item.stat().st_mtime
if verbosity > 0:
self.stdout.write("Extracting to %s ..." % moddir)
for item in rfcdir.iterdir():
if item.is_file() and item.name.startswith('rfc') and item.name.endswith('.txt') and item.name[3:-4].isdigit():
if item.stat().st_mtime > latest:
model_list = extract_from(item, moddir)
for name in model_list:
if name.startswith('ietf') or name.startswith('iana'):
if verbosity > 1:
self.stdout.write(" Extracted from %s: %s" % (item, name))
elif verbosity > 0:
self.stdout.write('.', ending='')
self.stdout.flush()
else:
modfile = moddir / name
modfile.unlink()
if verbosity > 1:
self.stdout.write(" Skipped module from %s: %s" % (item, name))
if verbosity > 0:
self.stdout.write("")
# Extract valid modules from drafts
six_months_ago = time.time() - 6*31*24*60*60
def active(item):
return item.stat().st_mtime > six_months_ago
draftdir = Path(settings.INTERNET_DRAFT_PATH)
moddir = Path(settings.SUBMIT_YANG_DRAFT_MODEL_DIR)
if not moddir.exists():
moddir.mkdir(parents=True)
if verbosity > 0:
self.stdout.write("Emptying %s ..." % moddir)
for item in moddir.iterdir():
item.unlink()
if verbosity > 0:
self.stdout.write("Extracting to %s ..." % moddir)
for item in draftdir.iterdir():
try:
if item.is_file() and item.name.startswith('draft') and item.name.endswith('.txt') and active(item):
model_list = extract_from(item, moddir, strict=False)
for name in model_list:
if not name.startswith('example'):
if verbosity > 1:
self.stdout.write(" Extracted module from %s: %s" % (item, name))
elif verbosity > 0:
self.stdout.write('.', ending='')
self.stdout.flush()
else:
modfile = moddir / name
modfile.unlink()
if verbosity > 1:
self.stdout.write(" Skipped module from %s: %s" % (item, name))
except UnicodeDecodeError as e:
self.stderr.write('\nError: %s' % (e, ))
self.stderr.write(item.name)
self.stderr.write('')
if verbosity > 0:
self.stdout.write('')
|
the-stack_0_23415 | #!/usr/bin/env python
import argparse
import glob
import os
import re
import subprocess
import sys
import pandas as pd
from utilities.log_util import get_logger, log_command
BCL2FASTQ = 'bcl2fastq'
S3_RETRY = 5
S3_LOG_DIR = 's3://trace-genomics/TraceGenomics/logs'
ROOT_DIR_PATH = '/tmp'
def get_default_requirements():
return argparse.Namespace(vcpus=64, memory=256000, storage=2000,
queue='aegea_batch_demux',
ecr_image='demuxer',
ulimits=['nofile:1000000'])
def get_parser():
parser = argparse.ArgumentParser(
prog='bcl2fastq.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--exp_id',
help='Name of the prefix on S3 in --s3_input_dir that contains the run you want to process',
required=True)
parser.add_argument('--s3_input_dir',
default='s3://trace-genomics/TraceGenomics',
help='S3 path for [exp_id] folder of BCL files')
parser.add_argument('--s3_output_dir',
default='s3://trace-genomics/TraceGenomics',
help='S3 path to put fastq files')
parser.add_argument('--s3_report_dir',
default='s3://trace-genomics/TraceGenomics/reports',
help='S3 path to put the bcl2fastq report')
parser.add_argument('--s3_sample_sheet_dir',
default='s3://trace-genomics/TraceGenomics/sample-sheets',
help='S3 path to look for the sample sheet')
parser.add_argument('--group_by_sample', action='store_true',
help='Group the fastq files into folders based on sample name')
parser.add_argument('--skip_undetermined', action='store_true',
help="Don't upload the Undetermined files (can save time)")
parser.add_argument('--no_s3_download', action='store_true',
help="Do not download bcl files from S3 (useful if testing or already have locally "
"demultiplexed files in the ROOT_DIR_PATH location. Currently a work in progress.")
parser.add_argument('--no_s3_upload', action='store_true',
help="Do not upload demultiplexed fastq files to S3 (useful if testing and don't want to "
"check demultiplexing locally without writing to S3. Currently a work in progress.")
parser.add_argument('--sample_sheet_name', default=None,
help='Defaults to [exp_id].csv')
parser.add_argument('--force-glacier', action='store_true',
help='Force a transfer from Glacier storage')
# TODO(dstone): add an option to delete the original un-demultiplexed from S3 afterward
parser.add_argument('--bcl2fastq_options',
default=['--no-lane-splitting'],
nargs=argparse.REMAINDER,
help='Options to pass to bcl2fastq')
return parser
def _check_for_run_information(sample_name):
"""
Helper function to try and find run ID information of the form RunXX_YY
"""
m = re.match('^Run\d+_\d+$', sample_name)
if m is not None:
return True
else:
return False
def main(logger):
parser = get_parser()
args = parser.parse_args()
if os.environ.get('AWS_BATCH_JOB_ID'):
root_dir = os.path.join(ROOT_DIR_PATH, os.environ['AWS_BATCH_JOB_ID'])
else:
root_dir = ROOT_DIR_PATH
if args.sample_sheet_name is None:
args.sample_sheet_name = '{}.csv'.format(args.exp_id)
# local directories
result_path = os.path.join(root_dir, 'data', 'hca', args.exp_id)
bcl_path = os.path.join(result_path, 'bcl')
output_path = os.path.join(result_path, 'fastqs')
if not args.no_s3_download:
# only make dirs if they don't exist yet
if not os.path.isdir(result_path):
os.makedirs(result_path)
if not os.path.isdir(bcl_path):
os.mkdir(bcl_path)
# download sample sheet
command = ['aws', 's3', 'cp', '--quiet',
os.path.join(args.s3_sample_sheet_dir, args.sample_sheet_name),
result_path]
for i in range(S3_RETRY):
try:
log_command(logger, command, shell=True)
break
except subprocess.CalledProcessError:
logger.info("retrying s3 copy")
else:
raise RuntimeError("couldn't download sample sheet {}".format(
os.path.join(args.s3_sample_sheet_dir, args.sample_sheet_name))
)
# do a check on the sample inputs to make sure we can get run IDs from all of them
# change this if the Illumina sample sheet output ever changes; otherwise this line has the headers
_SAMPLE_SHEET_STARTING_LINE = 21
df_csv = pd.read_csv(os.path.join(result_path, args.sample_sheet_name), header=_SAMPLE_SHEET_STARTING_LINE)
samples_not_matching_run_ids = [sample_name for sample_name in df_csv['Sample_ID'] if not _check_for_run_information(sample_name)]
if len(samples_not_matching_run_ids) > 0:
raise ValueError('Found sample names that I could not extract run ID values (of the form RunXX_YY) from: '
'{}'.format(samples_not_matching_run_ids))
# download the bcl files
command = ['aws', 's3', 'sync', '--quiet',
'--force-glacier-transfer' if args.force_glacier else '',
os.path.join(args.s3_input_dir, args.exp_id), bcl_path]
for i in range(S3_RETRY):
try:
log_command(logger, command, shell=True)
break
except subprocess.CalledProcessError:
logger.info("retrying s3 sync bcl")
else:
raise RuntimeError("couldn't sync {}".format(
os.path.join(args.s3_input_dir, args.exp_id))
)
# this is actually awful because the process forks and you have to go kill it yourself
command = ('while true;'
' do memusage=`cat /sys/fs/cgroup/memory/memory.usage_in_bytes`;'
' memgb=`echo "${memusage}/(1000000000)" | bc -l | xargs -I {} printf "%.2f\n" {}`;'
' echo "memory usage: ${memgb}GB";'
' echo "disk usage: " `df -h | grep -e "/$" | awk \'{print $(NF-4)" "$(NF-3)" "$(NF-2)" "$(NF-1)" "$NF}\''
' sleep 90;'
' done')
p = subprocess.Popen([command], shell=True)
# Run bcl2 fastq
command = [BCL2FASTQ, ' '.join(args.bcl2fastq_options),
'--sample-sheet', os.path.join(result_path,
args.sample_sheet_name),
'-R', bcl_path, '-o', output_path]
log_command(logger, command, shell=True)
# fix directory structure of the files *before* sync!
fastqgz_files = glob.glob(os.path.join(output_path, '*fastq.gz'))
logger.debug('all fastq.gz files\n{}\n\n'.format('\n'.join(fastqgz_files)))
# TODO(dstone): organize the run based on the TraceGenomics/RunXX/RunXX_YY/*.fastq.gz and do our usual rearrangement
for fastq_file in fastqgz_files:
if (args.skip_undetermined
and os.path.basename(fastq_file).startswith('Undetermined')):
logger.info("removing {}".format(os.path.basename(fastq_file)))
os.remove(fastq_file)
elif args.group_by_sample:
# exclude the sample number (_S[numbers])
m = re.match("(.+)(_S\d+_R[12]_001.fastq.gz)",
os.path.basename(fastq_file))
if m:
sample = m.group(1) # should be of the form RunX_Y
if not re.match('^Run\d+_\d+$', sample):
# shouldn't actually be able to get here, because there is a check above at the sample sheet level,
# but just in case
raise ValueError('Was expecting to find a sample name of the form RunXX_YY, could not find in {} sample name!'.format(sample))
run = sample.split('_')[0]
grouped_sample_path = os.path.join(output_path, run, sample) # organizes as RunX/RunX_Y/[sample stuff]
if not os.path.exists(grouped_sample_path):
logger.debug("creating {}".format(grouped_sample_path))
os.makedirs(grouped_sample_path)
logger.debug("moving {}".format(fastq_file))
os.rename(fastq_file, os.path.join(grouped_sample_path, os.path.basename(fastq_file)))
else:
logger.warning("Warning: regex didn't match {}".format(fastq_file))
sys.stdout.flush()
if not args.no_s3_upload:
# upload fastq files to destination folder
command = ['aws', 's3', 'sync', '--quiet', output_path,
args.s3_output_dir,
# this doesn't fit our output structure
#os.path.join(args.s3_output_dir, args.exp_id, 'rawdata'),
'--exclude', '"*"', '--include', '"*fastq.gz"']
for i in range(S3_RETRY):
try:
log_command(logger, command, shell=True)
break
except subprocess.CalledProcessError:
logger.info("retrying sync fastq")
else:
raise RuntimeError("couldn't sync fastqs")
# check fastq upload
command = ['aws', 's3', 'ls', '--recursive',
args.s3_output_dir]
#os.path.join(args.s3_output_dir, args.exp_id, 'rawdata')]
log_command(logger, command, shell=True)
# Move reports data back to S3
reports_path = subprocess.check_output(
"ls -d {}".format(os.path.join(output_path, 'Reports', 'html', '*',
'all', 'all', 'all')),
shell=True).rstrip()
command = ['aws', 's3', 'cp', '--quiet', reports_path,
os.path.join(args.s3_report_dir, args.exp_id),
'--recursive']
for i in range(S3_RETRY):
try:
log_command(logger, command, shell=True)
break
except subprocess.CalledProcessError:
logger.info("retrying cp reports")
else:
raise RuntimeError("couldn't cp reports")
p.kill()
if __name__ == "__main__":
mainlogger, log_file, file_handler = get_logger(__name__)
try:
main(mainlogger)
except:
mainlogger.info("An exception occurred", exc_info=True)
raise
finally:
if log_file:
log_cmd = 'aws s3 cp --quiet {} {}'.format(log_file, S3_LOG_DIR)
mainlogger.info(log_cmd)
file_handler.close()
subprocess.check_output(log_cmd, shell=True)
|
the-stack_0_23416 | from __future__ import annotations
import os
import platform
from contextlib import AsyncExitStack
from datetime import datetime, timezone
from inspect import isawaitable
from logging import Logger, getLogger
from typing import Callable
from uuid import UUID
import anyio
import attrs
from anyio import (
TASK_STATUS_IGNORED,
create_task_group,
get_cancelled_exc_class,
move_on_after,
)
from anyio.abc import CancelScope
from ..abc import AsyncDataStore, EventSource, Job
from ..context import current_worker, job_info
from ..converters import as_async_datastore
from ..enums import JobOutcome, RunState
from ..eventbrokers.async_local import LocalAsyncEventBroker
from ..events import JobAdded, WorkerStarted, WorkerStopped
from ..structures import JobInfo, JobResult
from ..validators import positive_integer
@attrs.define(eq=False)
class AsyncWorker:
"""Runs jobs locally in a task group."""
data_store: AsyncDataStore = attrs.field(converter=as_async_datastore)
max_concurrent_jobs: int = attrs.field(
kw_only=True, validator=positive_integer, default=100
)
identity: str = attrs.field(kw_only=True, default=None)
logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_wakeup_event: anyio.Event = attrs.field(init=False, factory=anyio.Event)
_acquired_jobs: set[Job] = attrs.field(init=False, factory=set)
_events: LocalAsyncEventBroker = attrs.field(
init=False, factory=LocalAsyncEventBroker
)
_running_jobs: set[UUID] = attrs.field(init=False, factory=set)
_exit_stack: AsyncExitStack = attrs.field(init=False)
def __attrs_post_init__(self) -> None:
if not self.identity:
self.identity = f"{platform.node()}-{os.getpid()}-{id(self)}"
@property
def events(self) -> EventSource:
return self._events
@property
def state(self) -> RunState:
return self._state
async def __aenter__(self):
self._state = RunState.starting
self._wakeup_event = anyio.Event()
self._exit_stack = AsyncExitStack()
await self._exit_stack.__aenter__()
await self._exit_stack.enter_async_context(self._events)
# Initialize the data store and start relaying events to the worker's event broker
await self._exit_stack.enter_async_context(self.data_store)
self._exit_stack.enter_context(
self.data_store.events.subscribe(self._events.publish)
)
# Wake up the worker if the data store emits a significant job event
self._exit_stack.enter_context(
self.data_store.events.subscribe(
lambda event: self._wakeup_event.set(), {JobAdded}
)
)
# Start the actual worker
task_group = create_task_group()
await self._exit_stack.enter_async_context(task_group)
await task_group.start(self.run)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self._state = RunState.stopping
self._wakeup_event.set()
await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb)
del self._wakeup_event
async def run(self, *, task_status=TASK_STATUS_IGNORED) -> None:
if self._state is not RunState.starting:
raise RuntimeError(
f"This function cannot be called while the worker is in the "
f"{self._state} state"
)
# Set the current worker
token = current_worker.set(self)
# Signal that the worker has started
self._state = RunState.started
task_status.started()
await self._events.publish(WorkerStarted())
exception: BaseException | None = None
try:
async with create_task_group() as tg:
while self._state is RunState.started:
limit = self.max_concurrent_jobs - len(self._running_jobs)
jobs = await self.data_store.acquire_jobs(self.identity, limit)
for job in jobs:
task = await self.data_store.get_task(job.task_id)
self._running_jobs.add(job.id)
tg.start_soon(self._run_job, job, task.func)
await self._wakeup_event.wait()
self._wakeup_event = anyio.Event()
except get_cancelled_exc_class():
pass
except BaseException as exc:
self.logger.exception("Worker crashed")
exception = exc
else:
self.logger.info("Worker stopped")
finally:
current_worker.reset(token)
self._state = RunState.stopped
self.logger.exception("Worker crashed")
with move_on_after(3, shield=True):
await self._events.publish(WorkerStopped(exception=exception))
async def _run_job(self, job: Job, func: Callable) -> None:
try:
# Check if the job started before the deadline
start_time = datetime.now(timezone.utc)
if job.start_deadline is not None and start_time > job.start_deadline:
result = JobResult(
job_id=job.id, outcome=JobOutcome.missed_start_deadline
)
await self.data_store.release_job(self.identity, job.task_id, result)
return
token = job_info.set(JobInfo.from_job(job))
try:
retval = func(*job.args, **job.kwargs)
if isawaitable(retval):
retval = await retval
except get_cancelled_exc_class():
with CancelScope(shield=True):
result = JobResult(job_id=job.id, outcome=JobOutcome.cancelled)
await self.data_store.release_job(
self.identity, job.task_id, result
)
except BaseException as exc:
result = JobResult(
job_id=job.id, outcome=JobOutcome.error, exception=exc
)
await self.data_store.release_job(self.identity, job.task_id, result)
if not isinstance(exc, Exception):
raise
else:
result = JobResult(
job_id=job.id, outcome=JobOutcome.success, return_value=retval
)
await self.data_store.release_job(self.identity, job.task_id, result)
finally:
job_info.reset(token)
finally:
self._running_jobs.remove(job.id)
|
the-stack_0_23417 | import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import seaborn
import pandas as pd
fig, ax = plt.subplots()
fig.set_tight_layout(True)
# Query the figure's on-screen size and DPI. Note that when saving the figure to
# a file, we need to provide a DPI for that separately.
print('fig size: {0} DPI, size in inches {1}'.format(
fig.get_dpi(), fig.get_size_inches()))
df1 = pd.read_csv('/home/renato/groimp_efficient/run_1c/jules/plant_94.txt',sep='\t', names=["time", "tt", "plant", "strip", "row", "pos", "species", "weed",
"age","nrbranches","leafArea","fpar","rfr","biom","yields","leafmass",
"stemmass", "rootmass","shootrootratio","abovebiom","transpiration"])
df2 = pd.read_csv('/home/renato/groimp_efficient/run_1c/jules/field_94.txt',sep='\t', names=["time", "species", "LAI", "nrShoots", "fAbs", "assCO2", "biomAbove", "yield", "harvestIndex","leafArea","fieldRFR"])
df3 = pd.read_csv('/home/renato/groimp_efficient/run_1c/jules/beta_1_94.txt',sep='\t',names=['beta'])
df4 = pd.read_csv('/home/renato/groimp_efficient/run_1c/jules/RWU_94.txt',sep='\t',names=['RWU'])
time = df1.time.values
time = np.array(time)
x = time
transpiration = df1.transpiration.values
transpiration = np.array(transpiration)
y1 = transpiration
RWU = df4.RWU.values
RWU = np.array(RWU)
y2 = RWU
#beta = df3.beta.values
#beta = np.array(beta)
#y = beta
# Plot a scatter that persists (isn't redrawn) and the initial line.
#x = np.arange(0, 20, 0.1)
ax.set_xlabel('Time')
ax.set_ylabel('Evapotranspiration (m.s-1)')
ax.set_ylim(0,2.e-7)
#ax.set_ylim(y1.min(),y1.max())
ax.scatter(x, y1)
ax.scatter(x, y2)
line1, = ax.plot(x, y1, 'r-', linewidth=2, label=r'PET_{0}')
line2, = ax.plot(x, y2, 'b-', linewidth=2, label=r'ET_{0}')
ax.legend()
def update(i):
label = 'timestep {0}'.format(i)
print(label)
df1 = pd.read_csv('/home/renato/groimp_efficient/run_1c/jules/plant_%s.txt'%i,sep='\t', names=["time", "tt", "plant", "strip", "row", "pos", "species", "weed",
"age","nrbranches","leafArea","fpar","rfr","biom","yields","leafmass",
"stemmass", "rootmass","shootrootratio","abovebiom","transpiration"])
df2 = pd.read_csv('/home/renato/groimp_efficient/run_1c/jules/field_%s.txt'%i,sep='\t', names=["time", "species", "LAI", "nrShoots", "fAbs", "assCO2", "biomAbove", "yield", "harvestIndex","leafArea","fieldRFR"])
df3 = pd.read_csv('/home/renato/groimp_efficient/run_1c/jules/beta_1_%s.txt'%i,sep='\t',names=['beta'])
df4 = pd.read_csv('/home/renato/groimp_efficient/run_1c/jules/RWU_%s.txt'%i,sep='\t',names=['RWU'])
time = df1.time.values
time = np.array(time)
x = time
transpiration = df1.transpiration.values
transpiration = np.array(transpiration)
y1 = transpiration
RWU = df4.RWU.values
RWU = np.array(RWU)
y2 = RWU
# Update the line and the axes (with a new xlabel). Return a tuple of
# "artists" that have to be redrawn for this frame.
line1.set_ydata(y1)
line2.set_ydata(y2)
#ax.set_xlabel(label)
ax.set_title(label)
return line1,line2, ax
if __name__ == '__main__':
# FuncAnimation will call the 'update' function for each frame; here
# animating over 10 frames, with an interval of 200ms between frames.
anim = FuncAnimation(fig, update, frames=np.arange(1, 95), interval=500)
if len(sys.argv) > 1 and sys.argv[1] == 'save':
anim.save('assco2.gif', dpi=300, writer='imagemagick')
else:
# plt.show() will just loop the animation forever.
plt.show()
|
the-stack_0_23418 | import os
from azure.storage.blob import (
BlobServiceClient,
PublicAccess
)
class AzureStorageUtil:
BLOB_STORE_URI_TEMPLATE = "https://{}.blob.core.windows.net/"
def __init__(self, storage_account_name, credentials):
self.credentials = credentials
self.account_name = storage_account_name
self.store_uri = AzureStorageUtil.BLOB_STORE_URI_TEMPLATE.format(
self.account_name
)
self.blob_svc_client = BlobServiceClient(
account_url=self.store_uri,
credential=self.credentials
)
def download_blob(self, container:str, blob_path:str, file_path:str):
return_value = False
if os.path.exists(file_path):
os.remove(file_path)
blob_client = self.blob_svc_client.get_blob_client(
container,
blob_path
)
if blob_client.exists():
return_value = True
with open(file_path , "wb") as blob_instance:
download_stream = blob_client.download_blob()
blob_instance.write(download_stream.readall())
return return_value
def upload_process_file(self, container:str, blob_path:str, file_path:str ):
if not os.path.exists(file_path):
raise Exception("File doesn't exist")
blob_client = self.blob_svc_client.get_blob_client(
container,
blob_path
)
with open(file_path, "rb") as data:
blob_client.upload_blob(data, blob_type="BlockBlob")
print("uploaded")
def delete_process_file(self, container: str, file_name: str):
result = self._process_file_exists(container, file_name)
if result[0]:
print("Found and deleting")
result[1].delete_blob()
def _process_file_exists(self, container: str, file_name: str) -> tuple:
if not self._container_exists(container):
self._create_container(container)
return (False, None)
blob_client = self.blob_svc_client.get_blob_client(
container,
file_name
)
return (blob_client.exists(), blob_client)
def _container_exists(self, container_name) -> bool:
client = self.blob_svc_client.get_container_client(container_name)
return client.exists()
def _create_container(self, container_name):
self.blob_svc_client.create_container(container_name)
# Set the permission so the blobs are public.
self.blob_svc_client.set_container_acl(
container_name,
public_access=PublicAccess.OFF
)
"""
AZ_CREDS = DefaultAzureCredential()
STG_ACCOUNT_NAME = "dagstorage"
STG_ACCOUNT_CONTAINER = "dummy"
LOCAL_FILE = "exampleconf.json"
FULL_LOCAL = os.path.join(os.path.split(__file__)[0], LOCAL_FILE)
azstg = AzureStorageUtil(STG_ACCOUNT_NAME, AZ_CREDS)
azstg.delete_process_file(STG_ACCOUNT_CONTAINER, LOCAL_FILE)
azstg.upload_process_file(STG_ACCOUNT_CONTAINER, FULL_LOCAL)
"""
|
the-stack_0_23419 | # Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the nova-status CLI interfaces.
"""
# NOTE(cdent): Additional tests of nova-status may be found in
# nova/tests/functional/test_nova_status.py. Those tests use the external
# PlacementFixture, which is only available in functioanl tests.
import fixtures
import mock
from six.moves import StringIO
from keystoneauth1 import exceptions as ks_exc
from keystoneauth1 import loading as keystone
from keystoneauth1 import session
from oslo_upgradecheck import upgradecheck
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
from requests import models
from nova.cmd import status
import nova.conf
from nova import context
# NOTE(mriedem): We only use objects as a convenience to populate the database
# in the tests, we don't use them in the actual CLI.
from nova import objects
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import fixtures as nova_fixtures
CONF = nova.conf.CONF
class TestNovaStatusMain(test.NoDBTestCase):
"""Tests for the basic nova-status command infrastructure."""
def setUp(self):
super(TestNovaStatusMain, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
@mock.patch.object(status.config, 'parse_args')
@mock.patch.object(status, 'CONF')
def _check_main(self, mock_CONF, mock_parse_args,
category_name='check', expected_return_value=0):
mock_CONF.category.name = category_name
return_value = status.main()
self.assertEqual(expected_return_value, return_value)
mock_CONF.register_cli_opt.assert_called_once_with(
status.category_opt)
@mock.patch.object(status.version, 'version_string_with_package',
return_value="x.x.x")
def test_main_version(self, mock_version_string):
self._check_main(category_name='version')
self.assertEqual("x.x.x\n", self.output.getvalue())
@mock.patch.object(status.cmd_common, 'print_bash_completion')
def test_main_bash_completion(self, mock_print_bash):
self._check_main(category_name='bash-completion')
mock_print_bash.assert_called_once_with(status.CATEGORIES)
@mock.patch.object(status.cmd_common, 'get_action_fn')
def test_main(self, mock_get_action_fn):
mock_fn = mock.Mock()
mock_fn_args = [mock.sentinel.arg]
mock_fn_kwargs = {'key': mock.sentinel.value}
mock_get_action_fn.return_value = (mock_fn, mock_fn_args,
mock_fn_kwargs)
self._check_main(expected_return_value=mock_fn.return_value)
mock_fn.assert_called_once_with(mock.sentinel.arg,
key=mock.sentinel.value)
@mock.patch.object(status.cmd_common, 'get_action_fn')
def test_main_error(self, mock_get_action_fn):
mock_fn = mock.Mock(side_effect=Exception('wut'))
mock_get_action_fn.return_value = (mock_fn, [], {})
self._check_main(expected_return_value=255)
output = self.output.getvalue()
self.assertIn('Error:', output)
# assert the traceback is in the output
self.assertIn('wut', output)
class TestPlacementCheck(test.NoDBTestCase):
"""Tests the nova-status placement checks.
These are done with mock as the ability to replicate all failure
domains otherwise is quite complicated. Using a devstack
environment you can validate each of these tests are matching
reality.
"""
def setUp(self):
super(TestPlacementCheck, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
self.cmd = status.UpgradeCommands()
@mock.patch.object(keystone, "load_auth_from_conf_options")
def test_no_auth(self, auth):
"""Test failure when no credentials are specified.
Replicate in devstack: start devstack with or without
placement engine, remove the auth section from the [placement]
block in nova.conf.
"""
auth.side_effect = ks_exc.MissingAuthPlugin()
res = self.cmd._check_placement()
self.assertEqual(upgradecheck.Code.FAILURE, res.code)
self.assertIn('No credentials specified', res.details)
@mock.patch.object(keystone, "load_auth_from_conf_options")
@mock.patch.object(session.Session, 'request')
def _test_placement_get_interface(
self, expected_interface, mock_get, mock_auth):
def fake_request(path, method, *a, **kw):
self.assertEqual(mock.sentinel.path, path)
self.assertEqual('GET', method)
self.assertIn('endpoint_filter', kw)
self.assertEqual(expected_interface,
kw['endpoint_filter']['interface'])
return mock.Mock(autospec=models.Response)
mock_get.side_effect = fake_request
self.cmd._placement_get(mock.sentinel.path)
mock_auth.assert_called_once_with(status.CONF, 'placement')
self.assertTrue(mock_get.called)
def test_placement_get_interface_default(self):
"""Tests that we try internal, then public interface by default."""
self._test_placement_get_interface(['internal', 'public'])
def test_placement_get_interface_internal(self):
"""Tests that "internal" is specified for interface when configured."""
self.flags(valid_interfaces='internal', group='placement')
self._test_placement_get_interface(['internal'])
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_invalid_auth(self, get):
"""Test failure when wrong credentials are specified or service user
doesn't exist.
Replicate in devstack: start devstack with or without
placement engine, specify random credentials in auth section
from the [placement] block in nova.conf.
"""
get.side_effect = ks_exc.Unauthorized()
res = self.cmd._check_placement()
self.assertEqual(upgradecheck.Code.FAILURE, res.code)
self.assertIn('Placement service credentials do not work', res.details)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_invalid_endpoint(self, get):
"""Test failure when no endpoint exists.
Replicate in devstack: start devstack without placement
engine, but create valid placement service user and specify it
in auth section of [placement] in nova.conf.
"""
get.side_effect = ks_exc.EndpointNotFound()
res = self.cmd._check_placement()
self.assertEqual(upgradecheck.Code.FAILURE, res.code)
self.assertIn('Placement API endpoint not found', res.details)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_discovery_failure(self, get):
"""Test failure when discovery for placement URL failed.
Replicate in devstack: start devstack with placement
engine, create valid placement service user and specify it
in auth section of [placement] in nova.conf. Stop keystone service.
"""
get.side_effect = ks_exc.DiscoveryFailure()
res = self.cmd._check_placement()
self.assertEqual(upgradecheck.Code.FAILURE, res.code)
self.assertIn('Discovery for placement API URI failed.', res.details)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_down_endpoint(self, get):
"""Test failure when endpoint is down.
Replicate in devstack: start devstack with placement
engine, disable placement engine apache config.
"""
get.side_effect = ks_exc.NotFound()
res = self.cmd._check_placement()
self.assertEqual(upgradecheck.Code.FAILURE, res.code)
self.assertIn('Placement API does not seem to be running', res.details)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_valid_version(self, get):
get.return_value = {
"versions": [
{
"min_version": "1.0",
"max_version": status.MIN_PLACEMENT_MICROVERSION,
"id": "v1.0"
}
]
}
res = self.cmd._check_placement()
self.assertEqual(upgradecheck.Code.SUCCESS, res.code)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_version_comparison_does_not_use_floats(self, get):
# NOTE(rpodolyaka): previously _check_placement() coerced the version
# numbers to floats prior to comparison, that would lead to failures
# in cases like float('1.10') < float('1.4'). As we require 1.4+ now,
# the _check_placement() call below will assert that version comparison
# continues to work correctly when Placement API versions 1.10
# (or newer) is released
get.return_value = {
"versions": [
{
"min_version": "1.0",
"max_version": status.MIN_PLACEMENT_MICROVERSION,
"id": "v1.0"
}
]
}
res = self.cmd._check_placement()
self.assertEqual(upgradecheck.Code.SUCCESS, res.code)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_invalid_version(self, get):
get.return_value = {
"versions": [
{
"min_version": "0.9",
"max_version": "0.9",
"id": "v1.0"
}
]
}
res = self.cmd._check_placement()
self.assertEqual(upgradecheck.Code.FAILURE, res.code)
self.assertIn('Placement API version %s needed, you have 0.9' %
status.MIN_PLACEMENT_MICROVERSION, res.details)
class TestUpgradeCheckCellsV2(test.NoDBTestCase):
"""Tests for the nova-status upgrade cells v2 specific check."""
# We'll setup the API DB fixture ourselves and slowly build up the
# contents until the check passes.
USES_DB_SELF = True
def setUp(self):
super(TestUpgradeCheckCellsV2, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
self.useFixture(nova_fixtures.Database(database='api'))
self.cmd = status.UpgradeCommands()
def test_check_no_cell_mappings(self):
"""The cells v2 check should fail because there are no cell mappings.
"""
result = self.cmd._check_cellsv2()
self.assertEqual(upgradecheck.Code.FAILURE, result.code)
self.assertIn('There needs to be at least two cell mappings',
result.details)
def _create_cell_mapping(self, uuid):
cm = objects.CellMapping(
context=context.get_admin_context(),
uuid=uuid,
name=uuid,
transport_url='fake://%s/' % uuid,
database_connection=uuid)
cm.create()
return cm
def test_check_no_cell0_mapping(self):
"""We'll create two cell mappings but not have cell0 mapped yet."""
for i in range(2):
uuid = getattr(uuids, str(i))
self._create_cell_mapping(uuid)
result = self.cmd._check_cellsv2()
self.assertEqual(upgradecheck.Code.FAILURE, result.code)
self.assertIn('No cell0 mapping found', result.details)
def test_check_no_host_mappings_with_computes(self):
"""Creates a cell0 and cell1 mapping but no host mappings and there are
compute nodes in the cell database.
"""
self._setup_cells()
cn = objects.ComputeNode(
context=context.get_admin_context(),
host='fake-host',
vcpus=4,
memory_mb=8 * 1024,
local_gb=40,
vcpus_used=2,
memory_mb_used=2 * 1024,
local_gb_used=10,
hypervisor_type='fake',
hypervisor_version=1,
cpu_info='{"arch": "x86_64"}')
cn.create()
result = self.cmd._check_cellsv2()
self.assertEqual(upgradecheck.Code.FAILURE, result.code)
self.assertIn('No host mappings found but there are compute nodes',
result.details)
def test_check_no_host_mappings_no_computes(self):
"""Creates the cell0 and cell1 mappings but no host mappings and no
compute nodes so it's assumed to be an initial install.
"""
self._setup_cells()
result = self.cmd._check_cellsv2()
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
self.assertIn('No host mappings or compute nodes were found',
result.details)
def test_check_success(self):
"""Tests a successful cells v2 upgrade check."""
# create the cell0 and first cell mappings
self._setup_cells()
# Start a compute service and create a hostmapping for it
svc = self.start_service('compute')
cell = self.cell_mappings[test.CELL1_NAME]
hm = objects.HostMapping(context=context.get_admin_context(),
host=svc.host,
cell_mapping=cell)
hm.create()
result = self.cmd._check_cellsv2()
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
self.assertIsNone(result.details)
class TestUpgradeCheckIronicFlavorMigration(test.NoDBTestCase):
"""Tests for the nova-status upgrade check on ironic flavor migration."""
# We'll setup the database ourselves because we need to use cells fixtures
# for multiple cell mappings.
USES_DB_SELF = True
# This will create three cell mappings: cell0, cell1 (default) and cell2
NUMBER_OF_CELLS = 2
def setUp(self):
super(TestUpgradeCheckIronicFlavorMigration, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
# We always need the API DB to be setup.
self.useFixture(nova_fixtures.Database(database='api'))
self.cmd = status.UpgradeCommands()
@staticmethod
def _create_node_in_cell(ctxt, cell, hypervisor_type, nodename):
with context.target_cell(ctxt, cell) as cctxt:
cn = objects.ComputeNode(
context=cctxt,
hypervisor_type=hypervisor_type,
hypervisor_hostname=nodename,
# The rest of these values are fakes.
host=uuids.host,
vcpus=4,
memory_mb=8 * 1024,
local_gb=40,
vcpus_used=2,
memory_mb_used=2 * 1024,
local_gb_used=10,
hypervisor_version=1,
cpu_info='{"arch": "x86_64"}')
cn.create()
return cn
@staticmethod
def _create_instance_in_cell(ctxt, cell, node, is_deleted=False,
flavor_migrated=False):
with context.target_cell(ctxt, cell) as cctxt:
inst = objects.Instance(
context=cctxt,
host=node.host,
node=node.hypervisor_hostname,
uuid=uuidutils.generate_uuid())
inst.create()
if is_deleted:
inst.destroy()
else:
# Create an embedded flavor for the instance. We don't create
# this because we're in a cell context and flavors are global,
# but we don't actually care about global flavors in this
# check.
extra_specs = {}
if flavor_migrated:
extra_specs['resources:CUSTOM_BAREMETAL_GOLD'] = '1'
inst.flavor = objects.Flavor(cctxt, extra_specs=extra_specs)
inst.old_flavor = None
inst.new_flavor = None
inst.save()
return inst
def test_fresh_install_no_cell_mappings(self):
"""Tests the scenario where we don't have any cell mappings (no cells
v2 setup yet) so we don't know what state we're in and we return a
warning.
"""
result = self.cmd._check_ironic_flavor_migration()
self.assertEqual(upgradecheck.Code.WARNING, result.code)
self.assertIn('Unable to determine ironic flavor migration without '
'cell mappings', result.details)
def test_fresh_install_no_computes(self):
"""Tests a fresh install scenario where we have two non-cell0 cells
but no compute nodes in either cell yet, so there is nothing to do
and we return success.
"""
self._setup_cells()
result = self.cmd._check_ironic_flavor_migration()
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
def test_mixed_computes_deleted_ironic_instance(self):
"""Tests the scenario where we have a kvm compute node in one cell
and an ironic compute node in another cell. The kvm compute node does
not have any instances. The ironic compute node has an instance with
the same hypervisor_hostname match but the instance is (soft) deleted
so it's ignored.
"""
self._setup_cells()
ctxt = context.get_admin_context()
# Create the ironic compute node in cell1
ironic_node = self._create_node_in_cell(
ctxt, self.cell_mappings['cell1'], 'ironic', uuids.node_uuid)
# Create the kvm compute node in cell2
self._create_node_in_cell(
ctxt, self.cell_mappings['cell2'], 'kvm', 'fake-kvm-host')
# Now create an instance in cell1 which is on the ironic node but is
# soft deleted (instance.deleted == instance.id).
self._create_instance_in_cell(
ctxt, self.cell_mappings['cell1'], ironic_node, is_deleted=True)
result = self.cmd._check_ironic_flavor_migration()
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
def test_unmigrated_ironic_instances(self):
"""Tests a scenario where we have two cells with only ironic compute
nodes. The first cell has one migrated and one unmigrated instance.
The second cell has two unmigrated instances. The result is the check
returns failure.
"""
self._setup_cells()
ctxt = context.get_admin_context()
# Create the ironic compute nodes in cell1
for x in range(2):
cell = self.cell_mappings['cell1']
ironic_node = self._create_node_in_cell(
ctxt, cell, 'ironic', getattr(uuids, 'cell1-node-%d' % x))
# Create an instance for this node. In cell1, we have one
# migrated and one unmigrated instance.
flavor_migrated = True if x % 2 else False
self._create_instance_in_cell(
ctxt, cell, ironic_node, flavor_migrated=flavor_migrated)
# Create the ironic compute nodes in cell2
for x in range(2):
cell = self.cell_mappings['cell2']
ironic_node = self._create_node_in_cell(
ctxt, cell, 'ironic', getattr(uuids, 'cell2-node-%d' % x))
# Create an instance for this node. In cell2, all instances are
# unmigrated.
self._create_instance_in_cell(
ctxt, cell, ironic_node, flavor_migrated=False)
result = self.cmd._check_ironic_flavor_migration()
self.assertEqual(upgradecheck.Code.FAILURE, result.code)
# Check the message - it should point out cell1 has one unmigrated
# instance and cell2 has two unmigrated instances.
unmigrated_instance_count_by_cell = {
self.cell_mappings['cell1'].uuid: 1,
self.cell_mappings['cell2'].uuid: 2
}
self.assertIn(
'There are (cell=x) number of unmigrated instances in each '
'cell: %s.' % ' '.join('(%s=%s)' % (
cell_id, unmigrated_instance_count_by_cell[cell_id])
for cell_id in
sorted(unmigrated_instance_count_by_cell.keys())),
result.details)
def _create_minimal_request_spec(ctxt, instance):
request_spec = objects.RequestSpec.from_components(
ctxt, instance.uuid, instance.image_meta,
instance.flavor, instance.numa_topology,
instance.pci_requests,
{}, None, instance.availability_zone,
project_id=instance.project_id,
user_id=instance.user_id
)
scheduler_utils.setup_instance_group(ctxt, request_spec)
request_spec.create()
class TestUpgradeCheckRequestSpecMigration(test.NoDBTestCase):
"""Tests for the nova-status upgrade check for request spec migration."""
# We'll setup the database ourselves because we need to use cells fixtures
# for multiple cell mappings.
USES_DB_SELF = True
# This will create three cell mappings: cell0, cell1 (default) and cell2
NUMBER_OF_CELLS = 2
def setUp(self):
super(TestUpgradeCheckRequestSpecMigration, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
# We always need the API DB to be setup.
self.useFixture(nova_fixtures.Database(database='api'))
self.cmd = status.UpgradeCommands()
@staticmethod
def _create_instance_in_cell(ctxt, cell, is_deleted=False,
create_request_spec=False):
with context.target_cell(ctxt, cell) as cctxt:
inst = objects.Instance(
context=cctxt,
uuid=uuidutils.generate_uuid())
inst.create()
if is_deleted:
inst.destroy()
if create_request_spec:
# Fake out some fields in the Instance so we don't lazy-load them.
inst.flavor = objects.Flavor()
inst.numa_topology = None
inst.system_metadata = {}
inst.pci_requests = None
inst.project_id = 'fake-project'
inst.user_id = 'fake-user'
_create_minimal_request_spec(ctxt, inst)
return inst
def test_fresh_install_no_cell_mappings(self):
"""Tests the scenario where we don't have any cell mappings (no cells
v2 setup yet) so we don't know what state we're in and we return a
warning.
"""
result = self.cmd._check_request_spec_migration()
self.assertEqual(upgradecheck.Code.WARNING, result.code)
self.assertIn('Unable to determine request spec migrations without '
'cell mappings.', result.details)
def test_deleted_instance_one_cell_migrated_other_success(self):
"""Tests the scenario that we have two cells, one has only a single
deleted instance in it and the other has a single already-migrated
instance in it, so the overall result is success.
"""
self._setup_cells()
ctxt = context.get_admin_context()
# Create a deleted instance in cell1.
self._create_instance_in_cell(
ctxt, self.cell_mappings['cell1'], is_deleted=True)
# Create a migrated instance in cell2.
self._create_instance_in_cell(
ctxt, self.cell_mappings['cell2'], create_request_spec=True)
result = self.cmd._check_request_spec_migration()
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
def test_unmigrated_request_spec_instances(self):
"""Tests the scenario that we have a migrated instance in cell1 and
an unmigrated instance in cell2 so the check fails.
"""
self._setup_cells()
ctxt = context.get_admin_context()
# Create a migrated instance in cell1.
self._create_instance_in_cell(
ctxt, self.cell_mappings['cell1'], create_request_spec=True)
# Create an unmigrated instance in cell2.
self._create_instance_in_cell(ctxt, self.cell_mappings['cell2'])
result = self.cmd._check_request_spec_migration()
self.assertEqual(upgradecheck.Code.FAILURE, result.code)
self.assertIn("The following cells have instances which do not have "
"matching request_specs in the API database: %s Run "
"'nova-manage db online_data_migrations' on each cell "
"to create the missing request specs." %
self.cell_mappings['cell2'].uuid, result.details)
class TestUpgradeCheckConsoles(test.NoDBTestCase):
"""Tests for the nova-status upgrade check for consoles."""
# We'll setup the database ourselves because we need to use cells fixtures
# for multiple cell mappings.
USES_DB_SELF = True
# This will create three cell mappings: cell0, cell1 (default) and cell2
NUMBER_OF_CELLS = 2
def setUp(self):
super(TestUpgradeCheckConsoles, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
# We always need the API DB to be setup.
self.useFixture(nova_fixtures.Database(database='api'))
self.cmd = status.UpgradeCommands()
@staticmethod
def _create_service_in_cell(ctxt, cell, binary, is_deleted=False,
disabled=False, version=None,
create_token_auth=False):
with context.target_cell(ctxt, cell) as cctxt:
service = objects.Service(context=cctxt, binary=binary,
disabled=disabled, host='dontcare')
if version:
service.version = version
service.create()
if is_deleted:
service.destroy()
if create_token_auth:
# We have to create an instance in order to create a token
# auth.
inst = objects.Instance(context=cctxt,
uuid=uuidutils.generate_uuid())
inst.create()
auth = objects.ConsoleAuthToken(context=cctxt,
console_type='novnc',
host='hostname', port=6080,
instance_uuid=inst.uuid)
auth.authorize(CONF.consoleauth.token_ttl)
return service
def test_check_cells_v1_enabled(self):
"""This is a 'success' case since the console auths check is
ignored when running cells v1.
"""
self.flags(enable=True, group='cells')
result = self.cmd._check_console_auths()
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
def test_check_workaround_enabled(self):
"""This is a 'success' case since the console auths check is
ignored when the workaround is already enabled.
"""
self.flags(enable_consoleauth=True, group='workarounds')
result = self.cmd._check_console_auths()
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
def test_deleted_disabled_consoleauth(self):
"""Tests that services other than nova-consoleauth and deleted/disabled
nova-consoleauth services are filtered out.
"""
self._setup_cells()
ctxt = context.get_admin_context()
# Create a compute service in cell1.
self._create_service_in_cell(ctxt, self.cell_mappings['cell1'],
'nova-compute')
# Create a deleted consoleauth service in cell1.
self._create_service_in_cell(ctxt, self.cell_mappings['cell1'],
'nova-consoleauth', is_deleted=True)
# Create a compute service in cell2.
self._create_service_in_cell(ctxt, self.cell_mappings['cell2'],
'nova-compute')
# Create a disabled consoleauth service in cell2.
self._create_service_in_cell(ctxt, self.cell_mappings['cell2'],
'nova-consoleauth', disabled=True)
result = self.cmd._check_console_auths()
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
def test_consoleauth_with_upgrade_not_started(self):
"""Tests the scenario where the deployment is using consoles but has no
compute services >= Rocky, i.e. a not started upgrade.
"""
self._setup_cells()
ctxt = context.get_admin_context()
# Create a deleted consoleauth service in cell1.
self._create_service_in_cell(ctxt, self.cell_mappings['cell1'],
'nova-consoleauth', is_deleted=True)
# Create a live consoleauth service in cell0. (Asserts we check cell0).
self._create_service_in_cell(ctxt, self.cell_mappings['cell0'],
'nova-consoleauth')
# Create Queens compute services in the cells.
for cell in ['cell1', 'cell2']:
self._create_service_in_cell(ctxt, self.cell_mappings[cell],
'nova-compute', version=30)
result = self.cmd._check_console_auths()
self.assertEqual(upgradecheck.Code.WARNING, result.code)
def test_consoleauth_with_upgrade_complete(self):
"""Tests the scenario where the deployment is using consoles and has
all compute services >= Rocky in every cell database, i.e. a completed
upgrade.
"""
self._setup_cells()
ctxt = context.get_admin_context()
# Create a live consoleauth service in cell1 with token auth.
self._create_service_in_cell(ctxt, self.cell_mappings['cell1'],
'nova-consoleauth',
create_token_auth=True)
# Create a live consoleauth service in cell2 with token auth.
self._create_service_in_cell(ctxt, self.cell_mappings['cell2'],
'nova-consoleauth',
create_token_auth=True)
# Create Rocky compute services in the cells.
for cell in ['cell1', 'cell2']:
self._create_service_in_cell(ctxt, self.cell_mappings[cell],
'nova-compute', version=35)
# Create a Queens compute service in cell0. This not actually valid,
# we do it to assert that we skip cell0 when checking service versions.
self._create_service_in_cell(ctxt, self.cell_mappings['cell0'],
'nova-compute', version=30)
result = self.cmd._check_console_auths()
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
def test_consoleauth_with_upgrade_partial(self):
"""Tests the scenario where the deployment is using consoles and has
compute services >= Rocky in at least one, but not all, cell databases,
i.e. a partial upgrade.
"""
self._setup_cells()
ctxt = context.get_admin_context()
# Create a live consoleauth service in cell1.
self._create_service_in_cell(ctxt, self.cell_mappings['cell1'],
'nova-consoleauth')
# Create a live consoleauth service in cell2 with token auth.
self._create_service_in_cell(ctxt, self.cell_mappings['cell2'],
'nova-consoleauth',
create_token_auth=True)
# Create a Queens compute service in cell1.
self._create_service_in_cell(ctxt, self.cell_mappings['cell1'],
'nova-compute', version=30)
# Create a Rocky compute service in cell2.
self._create_service_in_cell(ctxt, self.cell_mappings['cell2'],
'nova-compute', version=35)
result = self.cmd._check_console_auths()
self.assertEqual(upgradecheck.Code.WARNING, result.code)
self.assertIn("One or more cells were found which have nova-compute "
"services older than Rocky. "
"Please set the '[workarounds]enable_consoleauth' "
"configuration option to 'True' on your console proxy "
"host if you are performing a rolling upgrade to enable "
"consoles to function during a partial upgrade.",
result.details)
|
the-stack_0_23420 | from os import listdir, walk
files = None
folders = None
for _, dirs, files in walk("bot", topdown=True):
files = files
folders = dirs
break
__all__ = []
for folder in folders:
if folder != "lib" and folder != "__pycache__" and folder != "cache":
for file in listdir(f"bot/{folder}"):
if file != "__pycache__" and file != "ban.py" and file.endswith(".py"):
__import__(f"bot.{folder}.{file[:-3]}")
for file in files:
if file.endswith(".py") \
and not file.startswith("__") \
and file != "ban.py":
__all__.append(file[:-3])
__all__.append("ban")
|
the-stack_0_23421 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Streaming aware inverse_stft layer."""
import functools
from kws_streaming.layers import modes
from kws_streaming.layers.compat import tf
class InverseSTFT(tf.keras.layers.Layer):
"""Streaming aware InverseSTFT layer.
Computes inverse_stft in streaming or non-streaming mode.
Attributes:
frame_size: Sliding window/frame size in samples.
frame_step: Number of samples to jump between frames. Also called hop size
window_type: None or hann_tf are supported.
inverse_stft_window_fn: If True window_fn=tf.signal.inverse_stft_window_fn
else window_fn=synthesis_window_fn which is defined by window_type.
fft_size: If None then closed to frame_size power of 2 will be used.
mode: Inference or training mode.
use_one_step: If True, model will run one sample per one inference step;
if False, model will run multiple per one inference step. It is useful
for strided streaming.
input_frames: Number of the input frames in streaming mode, it will be
estimated automatically in build method.
state_name_tag: Tag appended to the state's name.
**kwargs: Additional layer arguments.
"""
def __init__(self,
frame_size,
frame_step,
inverse_stft_window_fn=True,
window_type='hann_tf',
fft_size=None,
inference_batch_size=1,
mode=modes.Modes.TRAINING,
use_one_step=False,
input_frames=None,
state_name_tag='ExternalState',
**kwargs):
super(InverseSTFT, self).__init__(**kwargs)
self.frame_size = frame_size
self.frame_step = frame_step
self.window_type = window_type
self.inverse_stft_window_fn = inverse_stft_window_fn
self.fft_size = fft_size
self.inference_batch_size = inference_batch_size
self.mode = mode
self.use_one_step = use_one_step
self.state_name_tag = state_name_tag
if self.window_type not in [None, 'hann_tf']:
raise ValueError('Usupported window_type', self.window_type)
if self.mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE:
# create state varaible for inference streaming with internal state
self.states = self.add_weight(
name=self.name + 'frame_states',
shape=[self.inference_batch_size, self.frame_size],
trainable=False,
initializer=tf.zeros_initializer,
dtype=tf.float32)
elif self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
# in streaming mode with external state,
# state becomes an input output placeholders
self.input_state = tf.keras.layers.Input(
shape=(self.frame_size,),
batch_size=self.inference_batch_size,
name=self.name + 'frame_states',
dtype=tf.float32)
self.output_state = None
self.window_fn = None
self.synthesis_window_fn = None
if self.window_type == 'hann_tf':
self.synthesis_window_fn = functools.partial(
tf.signal.hann_window, periodic=True)
if self.inverse_stft_window_fn:
self.window_fn = tf.signal.inverse_stft_window_fn(
self.frame_step, forward_window_fn=self.synthesis_window_fn)
else:
self.window_fn = self.synthesis_window_fn
else:
self.window_fn = None
def build(self, input_shape):
super(InverseSTFT, self).build(input_shape)
self.input_frames = input_shape.as_list()[1]
def get_config(self):
config = super(InverseSTFT, self).get_config()
config.update({
'frame_size': self.frame_size,
'frame_step': self.frame_step,
'window_type': self.window_type,
'inverse_stft_window_fn': self.inverse_stft_window_fn,
'fft_size': self.fft_size,
'inference_batch_size': self.inference_batch_size,
'mode': self.mode,
'use_one_step': self.use_one_step,
'state_name_tag': self.state_name_tag,
'input_frames': self.input_frames,
})
return config
def get_input_state(self):
# input state will be used only for STREAM_EXTERNAL_STATE_INFERENCE mode
if self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
return [self.input_state]
else:
raise ValueError('Expected the layer to be in external streaming mode, '
f'not `{self.mode}`.')
def get_output_state(self):
# output state will be used only for STREAM_EXTERNAL_STATE_INFERENCE mode
if self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
return [self.output_state]
else:
raise ValueError('Expected the layer to be in external streaming mode, '
f'not `{self.mode}`.')
def _streaming_internal_state(self, inputs):
inversed_frames, new_states = self._streaming_external_state(
inputs, self.states)
assign_states = self.states.assign(new_states)
with tf.control_dependencies([assign_states]):
# use tf.identity to ensure that assign_states is executed
return tf.identity(inversed_frames)
def _streaming_external_state(self, inputs, state):
state = [] if state is None else state
# compute inversed FT of any number of input frames
inversed_frame = tf.signal.inverse_stft(
inputs,
self.frame_size,
self.frame_step,
self.fft_size,
window_fn=self.window_fn)
inversed_frame = tf.cast(inversed_frame, tf.float32)
# if there is no overlap between frames then
# there is no need in streaming state processing
if self.frame_size - self.frame_step <= 0:
return inversed_frame, state
if self.use_one_step: # streaming with input frame by frame
# update frame state
new_frame_state = state + inversed_frame[:, 0:self.frame_size]
# get output hop before frame shifting
inversed_frames = new_frame_state[:, 0:self.frame_step]
# shift frame samples by frame_step to the left: ring buffer
new_frame_state = tf.concat(
[new_frame_state, tf.zeros([1, self.frame_step])], axis=1)
new_frame_state = new_frame_state[:, -self.frame_size:]
else: # streaming with several input frames
previous_state = state + inversed_frame[:, 0:self.frame_size]
new_frame_state = tf.concat(
[previous_state, inversed_frame[:, self.frame_size:]], axis=1)
# get output hops before frame shifting
inversed_frames = new_frame_state[:,
0:self.frame_step * self.input_frames]
# shift frame samples by frame_step to the left: ring buffer
new_frame_state = tf.concat(
[new_frame_state, tf.zeros([1, self.frame_step])], axis=1)
new_frame_state = new_frame_state[:, -self.frame_size:]
return inversed_frames, new_frame_state
def _non_streaming(self, inputs):
# note that if not rectangular window_fn is used then,
# the first and last reconstructed frames will be numerically different
# from the original audio frames
output = tf.signal.inverse_stft(
inputs,
self.frame_size,
self.frame_step,
self.fft_size,
window_fn=self.window_fn)
return tf.cast(output, tf.float32)
|
the-stack_0_23423 | #-*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys,os
sys.path.append(os.path.dirname(__file__))
from titanic_preprocess import get_binned_data
def plot_train_test_histogram(col_name, titanic_all, bins=10):
'''
学習用データと評価用データのヒストグラムを描画する
Parameters
----------
col_name : str
ヒストグラムを描画する列名
titanic_all : pd.DataFrame
全データ
bins : int
ヒストグラムのbinの数
'''
# ビン分割
all_values = titanic_all[col_name]
all_binned_values = get_binned_data(all_values, bins)
train_flg = titanic_all['Type'] == 'train'
train_binned_values = all_binned_values[train_flg]
test_flg = titanic_all['Type'] == 'test'
test_binned_values = all_binned_values[test_flg]
# カテゴリごとに件数を集計
train_plot_data = pd.DataFrame({'train': train_binned_values.value_counts() / sum(train_flg)})
test_plot_data = pd.DataFrame({'test': test_binned_values.value_counts() / sum(test_flg)})
all_plot_data = pd.DataFrame({'all': all_binned_values.value_counts()})
if all_values.dtype == np.int64:
train_plot_data.index = train_plot_data.index.astype(int)
test_plot_data.index = test_plot_data.index.astype(int)
all_plot_data.index = all_plot_data.index.astype(int)
train_plot_data = train_plot_data.sort_index()
test_plot_data = test_plot_data.sort_index()
all_plot_data = all_plot_data.sort_index()
# 全体カテゴリのindexに合わせる
train_plot_data = pd.concat([all_plot_data, train_plot_data], axis=1, sort=True).fillna(0)['train']
test_plot_data = pd.concat([all_plot_data, test_plot_data], axis=1, sort=True).fillna(0)['test']
x = np.arange(len(all_plot_data))
w = 0.4
plt.bar(x, train_plot_data, width=w, label='train', color='blue')
plt.bar(x+w, test_plot_data, width=w, label='test', color='red')
plt.xticks(x+w/2, all_plot_data.index, rotation=90)
plt.legend(loc='best')
def plot_survival_rate(col_name, titanic_all, target='Survived', bins=10, label_format='{:02}_{:.0f}-{:.0f}'):
'''
特徴量の値ごとの生存率を描画する
Parameters
----------
col_name : str
ヒストグラムを描画する列名
titanic_all : pd.DataFrame
全データ
target : str
ターゲットの列名
bins : int
ヒストグラムのbinの数。valuesがstr型の場合は無視される
'''
# ビン分割
all_values = titanic_all[col_name]
all_binned_values = get_binned_data(all_values, bins=bins, label_format=label_format)
train_flg = titanic_all['Type'] == 'train'
train_binned_values = all_binned_values[train_flg]
# カテゴリごとに集計する
feature_df = pd.DataFrame({col_name : train_binned_values, target : titanic_all[target]})
survival_rate_df = feature_df.groupby(col_name).mean()
count_df = feature_df.groupby(col_name).count()
count_df.columns = ['count']
category_survival_df = survival_rate_df.join(count_df)
if all_values.dtype == np.int64:
category_survival_df.index = category_survival_df.index.astype(int)
category_survival_df = category_survival_df.sort_index()
category_survival_df.index = category_survival_df.index.astype(str)
# ヒストグラムと生存率をplot
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax1.bar(category_survival_df.index, category_survival_df['count'], alpha=0.5)
ax1.set_ylabel('count')
ax1.set_xticklabels(category_survival_df.index, rotation=90)
ax2 = ax1.twinx()
ax2.plot(category_survival_df.index, category_survival_df[target], color='red', label=target)
ax2.set_ylabel('{} rate'.format(target))
ax2.set_ylim([0, 1.2])
ax2.legend(loc='best')
ax1.set_title('{target} rate by {col}'.format(target=target, col=col_name))
ax1.set_xlabel(col_name)
print(category_survival_df.to_string(formatters={target: '{:.1%}'.format}))
def plot_model_coefficient(model, feature_names):
'''
モデル係数の大小関係を描画する
Parameters
----------------
model : modelオブジェクト
feature_names : list
特徴量名のリスト
'''
coef_df = pd.DataFrame(model.coef_.T)
feature_df = pd.DataFrame(feature_names)
model_df = pd.concat([feature_df, coef_df, abs(coef_df)], axis=1)
model_df.columns = ['feature_name', 'coef', 'coef_abs']
model_df = model_df.sort_values(by='coef_abs')
plt.xticks(rotation=90)
plt.bar(model_df.feature_name, model_df.coef)
def get_plot_data(feature, X, y_pred, y_train, bins=10):
'''
特徴量と生存率(予測/実績)のグラフ描画用のデータセットを生成する
Parameters
----------
X : pd.DataFrame
特徴量のデータセット
y_pred : list
予測値
y_train : list
実績値
bins : int
binの数(default: 10)
'''
x_val = X[feature]
x = get_binned_data(x_val, bins)
feature_df = pd.DataFrame({feature : x, 'Survived(fact)' : y_train, 'Survived(pred)' : y_pred})
survival_rate_df = feature_df.groupby(feature).mean()
count_df = feature_df.groupby(feature).count()[['Survived(fact)']]
count_df.columns = ['count']
plot_df = survival_rate_df.join(count_df)
return plot_df
def plot_feature_result(axe, plot_df, loc='upper right'):
'''
特徴量ごとの予測/実績の傾向を描画する
Parameters
----------
axe : subplot
subplotオブジェクト
plot_df : pd.DataFrame
グラフ描画用のデータセット
'''
x_axis = np.array(plot_df.index)
feature = plot_df.index.name
axe.bar(x_axis, plot_df['count'], alpha=0.5)
axe.set_ylabel('count')
axe.set_xticklabels(x_axis, rotation=90)
ax2 = axe.twinx()
ax2.plot(x_axis, plot_df['Survived(fact)'], color='red', label='Survival(fact)')
ax2.plot(x_axis, plot_df['Survived(pred)'], color='blue', label='Survival(pred)')
ax2.set_ylabel('Survival rate')
ax2.legend(loc=loc)
axe.set_title('Survival rate by {feature}'.format(feature=feature))
axe.set_xlabel(feature)
def plot_single_regression_result(model, feature, X_train, X_orig, y_train):
'''
モデルの予測と実績の傾向を描画する
Parameters
----------
model :
学習済みオブジェクト
feature : str
特徴量名
X_train : pd.DataFrame
学習データ(正規化等の加工済)
X_orig : pd.DataFrame
学習データ(元データ)
y_train : list
実績値
'''
y_pred = [p[1] for p in model.predict_proba(X_train)]
plot_df = get_plot_data(feature, X_orig, y_pred, y_train)
fig, axe = plt.subplots(1, 1)
plot_feature_result(axe, plot_df)
def get_grid_search_result(model_tuning):
'''
チューニング結果をまとめたDataFrameを取得する
Parameters
----------
model_tuning :
GridSearchCVでのチューニング結果
'''
# パラメタとスコアをまとめる
score_df = pd.DataFrame()
for i, test_score in enumerate(model_tuning.cv_results_['mean_test_score']):
param = model_tuning.cv_results_['params'][i]
param_df = pd.DataFrame(param.values(), index=param.keys()).T
# Negative Log Lossの場合はLog Lossに変換する
if model_tuning.scoring == 'neg_log_loss':
test_score *= -1
param_df['score'] = test_score
score_df = pd.concat([score_df, param_df], axis=0)
score_df.reset_index(drop=True, inplace=True)
return score_df
def plot_rf_tuning_result(model_tuning, x_param_name):
'''
RandomForestのチューニングの結果をplot
Parameters
----------
model_tuning :
GridSearchCVでのチューニング結果
x_param_name : str
x軸に表示するパラメタ名
'''
score_df = get_grid_search_result(model_tuning)
# x軸に使うパラメタ以外のパラメタ
line_param_name = score_df.columns.to_list()
line_param_name.remove(x_param_name)
line_param_name.remove('score')
# 折れ線の凡例: 「パラメタ名=パラメタ値」
line_name_list = []
for i, item in score_df.iterrows():
line_name = ''
for param_name in line_param_name:
line_name += ', ' if line_name != '' else ''
line_name += param_name + '=' + str(item[param_name])
line_name_list.append(line_name)
score_df['line_name'] = line_name_list
# x_paramをx軸、line_paramを折れ線グラフで表現
_, ax = plt.subplots(1,1)
for line_name in np.unique(line_name_list):
plot_df = score_df.query('line_name == "{}"'.format(line_name))
plot_df = plot_df.sort_values(x_param_name)
ax.plot(plot_df[x_param_name], plot_df['score'], '-o', label=line_name)
ax.set_title("Grid Search", fontsize=20, fontweight='bold')
ax.set_xlabel(x_param_name, fontsize=16)
ax.set_ylabel('CV Average LogLoss', fontsize=16)
ax.legend(loc="upper right", bbox_to_anchor=(1.4, 0.95, 0.5, .100), fontsize=10)
ax.grid('on')
def plot_rf_param_tuning_result(model_tuning, param_name):
'''
パラメタごとの結果(平均Score)をplot
Parameters
----------
model_tuning :
GridSearchCVでのチューニング結果
param_name : str
集計軸にとるパラメタ名
'''
score_df = get_grid_search_result(model_tuning)
# 指定したパラメタ軸で平均scoreを集計する
plot_df = score_df.groupby(param_name).mean()
plot_df = plot_df.sort_values(param_name)
# x_paramをx軸、line_paramを折れ線グラフで表現
_, ax = plt.subplots(1,1)
ax.plot(plot_df.index, plot_df['score'], '-o', label='average score')
ax.set_title("Grid Search: " + param_name, fontsize=20, fontweight='bold')
ax.set_xlabel(param_name, fontsize=16)
ax.set_ylabel('CV Average LogLoss', fontsize=16)
ax.legend(fontsize=10)
ax.grid('on') |
the-stack_0_23424 | from pandac.PandaModules import TextNode
from direct.gui.DirectGui import DirectFrame
from direct.gui.DirectGui import DirectButton
from direct.gui.DirectGui import DirectLabel
from direct.gui import DirectGuiGlobals
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
class JellybeanRewardGui(DirectFrame):
notify = directNotify.newCategory('JellybeanRewardGui')
PreCountdownDelay = 1.0
CountDownRate = 0.2
JarLabelTextColor = (0.95, 0.95, 0.0, 1.0)
JarLabelMaxedTextColor = (1.0, 0.0, 0.0, 1.0)
def __init__(self, doneEvent):
self.doneEvent = doneEvent
DirectFrame.__init__(self)
self.reparentTo(aspect2d)
self.setPos(0.0, 0.0, 0.16)
self.stash()
publicPartyGui = loader.loadModel('phase_4/models/parties/publicPartyGUI')
self.frame = DirectFrame(parent=self, geom=publicPartyGui.find('**/activities_background'), geom_pos=(-0.8, 0.0, 0.2), geom_scale=2.0, relief=None)
self.earnedLabel = DirectLabel(parent=self, relief=None, text=str(0), text_align=TextNode.ACenter, text_pos=(0.0, -0.07), text_scale=0.2, text_fg=(0.95, 0.95, 0.0, 1.0), text_font=ToontownGlobals.getSignFont(), textMayChange=True, image=DirectGuiGlobals.getDefaultDialogGeom(), image_scale=(0.33, 1.0, 0.33), pos=(-0.3, 0.0, 0.2), scale=0.9)
purchaseModels = loader.loadModel('phase_4/models/gui/purchase_gui')
jarImage = purchaseModels.find('**/Jar')
self.jarLabel = DirectLabel(parent=self, relief=None, text=str(0), text_align=TextNode.ACenter, text_pos=(0.0, -0.07), text_scale=0.2, text_fg=JellybeanRewardGui.JarLabelTextColor, text_font=ToontownGlobals.getSignFont(), textMayChange=True, image=jarImage, scale=0.7, pos=(0.3, 0.0, 0.17))
purchaseModels.removeNode()
del purchaseModels
jarImage.removeNode()
del jarImage
self.messageLabel = DirectLabel(parent=self, relief=None, text='', text_align=TextNode.ALeft, text_wordwrap=16.0, text_scale=0.07, pos=(-0.52, 0.0, -0.1), textMayChange=True)
self.doubledJellybeanLabel = DirectLabel(parent=self, relief=None, text=TTLocalizer.PartyRewardDoubledJellybean, text_align=TextNode.ACenter, text_wordwrap=12.0, text_scale=0.09, text_fg=(1.0, 0.125, 0.125, 1.0), pos=(0.0, 0.0, -0.465), textMayChange=False)
self.doubledJellybeanLabel.hide()
self.closeButton = DirectButton(parent=self, relief=None, text=TTLocalizer.PartyJellybeanRewardOK, text_align=TextNode.ACenter, text_scale=0.065, text_pos=(0.0, -0.625), geom=(publicPartyGui.find('**/startButton_up'),
publicPartyGui.find('**/startButton_down'),
publicPartyGui.find('**/startButton_rollover'),
publicPartyGui.find('**/startButton_inactive')), geom_pos=(-0.39, 0.0, 0.125), command=self._close)
publicPartyGui.removeNode()
del publicPartyGui
self.countSound = base.loadSfx('phase_13/audio/sfx/tick_counter_short.mp3')
self.overMaxSound = base.loadSfx('phase_13/audio/sfx/tick_counter_overflow.mp3')
return
def showReward(self, earnedAmount, jarAmount, message):
JellybeanRewardGui.notify.debug('showReward( earnedAmount=%d, jarAmount=%d, ...)' % (earnedAmount, jarAmount))
self.earnedCount = earnedAmount
self.earnedLabel['text'] = str(self.earnedCount)
self.jarCount = jarAmount
self.jarMax = base.localAvatar.getMaxMoney()
self.jarLabel['text'] = str(self.jarCount)
self.jarLabel['text_fg'] = JellybeanRewardGui.JarLabelTextColor
self.messageLabel['text'] = message
if base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_DAY) or base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_PARTIES_HOLIDAY) or base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_PARTIES_HOLIDAY_MONTH):
self.doubledJellybeanLabel.show()
else:
self.doubledJellybeanLabel.hide()
self.unstash()
taskMgr.doMethodLater(JellybeanRewardGui.PreCountdownDelay, self.transferOneJellybean, 'JellybeanRewardGuiTransferOneJellybean', extraArgs=[])
def transferOneJellybean(self):
if self.earnedCount == 0:
return
self.earnedCount -= 1
self.earnedLabel['text'] = str(self.earnedCount)
self.jarCount += 1
if self.jarCount <= self.jarMax:
self.jarLabel['text'] = str(self.jarCount)
elif self.jarCount > self.jarMax:
self.jarLabel['text_fg'] = JellybeanRewardGui.JarLabelMaxedTextColor
if self.jarCount <= self.jarMax:
base.playSfx(self.countSound)
else:
base.playSfx(self.overMaxSound)
taskMgr.doMethodLater(JellybeanRewardGui.CountDownRate, self.transferOneJellybean, 'JellybeanRewardGuiTransferOneJellybean', extraArgs=[])
def _close(self):
taskMgr.remove('JellybeanRewardGuiTransferOneJellybean')
self.stash()
messenger.send(self.doneEvent)
def destroy(self):
taskMgr.remove('JellybeanRewardGuiTransferOneJellybean')
del self.countSound
del self.overMaxSound
self.frame.destroy()
self.earnedLabel.destroy()
self.jarLabel.destroy()
self.messageLabel.destroy()
self.closeButton.destroy()
DirectFrame.destroy(self)
|
the-stack_0_23425 | import sys
import numpy as np
from scipy import signal
import torch
import torch.nn as nn
from torch.autograd import Variable
from tqdm import trange
from . import sequences
from . import initialize
class TCM:
def __init__(self, n_seeds, n_motifs, motif_width, min_sites, max_sites, batch_size, half_length, fudge, alpha,
revcomp, tolerance, maxiter, erasewhole, cuda):
self.n_seeds = n_seeds
self.n_motifs = n_motifs
self.motif_width = motif_width
self.min_sites = min_sites
self.max_sites = max_sites
self.batch_size = batch_size
self.half_length = half_length
self.fudge = fudge
self.alpha = alpha
self.revcomp = revcomp
self.tolerance = tolerance
self.maxiter = maxiter
self.erasewhole = erasewhole
self.cuda = cuda
def fit(self, X, X_neg=None):
"""Fit the model to the data X. Discover n_motifs motifs.
Parameters
----------
X : {list of string sequences}
Training data.
Returns
-------
self : TCM
The fitted model.
"""
ppms_final = []
ppms_bg_final = []
fracs_final = []
n_sites_final = []
for i_motif in range(self.n_motifs):
N = len(X)
if X_neg is not None:
top_words = initialize.find_enriched_gapped_kmers(X, X_neg, self.half_length, 0,
self.motif_width - 2 * self.half_length,
self.alpha, self.revcomp, self.n_seeds)
print('Converting letter sequences to tensors')
X = sequences.encode(X, self.alpha)
X_seqs_onehot = X # Need to use one hot coded positive sequences later
if X_neg is not None:
# Need to use one hot coded negative sequences later
X_neg_seqs_onehot = sequences.encode(X_neg, self.alpha)
# Extract valid one-hot subsequences
X = sequences.get_onehot_subsequences(X, self.motif_width)
M, L, W = X.shape
if self.revcomp:
M *= 2
# Compute motif fractions seeds
min_sites = self.min_sites
min_frac = min_sites / M
if self.max_sites is None:
max_sites = N # Expect at most one motif occurrence per sequence by default
else:
max_sites = self.max_sites
max_frac = max_sites / M
fracs_seeds = np.geomspace(min_sites, max_sites, 5) / M
n_uniq_fracs_seeds = len(fracs_seeds)
fracs_seeds = np.repeat(fracs_seeds, self.n_seeds)
fracs_seeds = torch.from_numpy(fracs_seeds.astype(np.float32))
# Compute background frequencies
letter_frequency = X.sum(axis=(0,2))
if self.revcomp: # If reverse complements considered, complement letter frequencies set to same value
letter_frequency[[0, 3]] = letter_frequency[0] + letter_frequency[3]
letter_frequency[[1, 2]] = letter_frequency[1] + letter_frequency[2]
X = np.concatenate((X, X[:,::-1,::-1]), axis=0)
bg_probs = 1.0 * letter_frequency / letter_frequency.sum()
ppms_bg_seeds = bg_probs.reshape([1, L, 1]).repeat(
self.n_seeds * n_uniq_fracs_seeds, axis=0).astype(np.float32)
ppms_bg_seeds = torch.from_numpy(ppms_bg_seeds)
# Initialize PPMs
large_prob = 0.9
small_prob = (1 - large_prob) / (L - 1)
if X_neg is not None:
ppms_seeds = sequences.encode(top_words, self.alpha)
ppms_seeds = sequences.pad_onehot_sequences(ppms_seeds, W).astype(np.float32) * large_prob
for ppm in ppms_seeds:
ppm[:, ppm.sum(axis=0)==0] = bg_probs.reshape((L, 1))
ppms_seeds[ppms_seeds == 0] = small_prob
else:
ppms_seeds = X[0:self.n_seeds].astype(np.float32) * large_prob
ppms_seeds[ppms_seeds == 0] = small_prob
ppms_seeds = np.tile(ppms_seeds, (n_uniq_fracs_seeds, 1, 1))
ppms_seeds_original = ppms_seeds.copy()
ppms_seeds = torch.from_numpy(ppms_seeds)
# If using cuda, convert the three parameter tensors to cuda format
if self.cuda:
ppms_bg_seeds = ppms_bg_seeds.cuda()
ppms_seeds = ppms_seeds.cuda()
fracs_seeds = fracs_seeds.cuda()
ppms_bg_seeds = ppms_bg_seeds.expand(len(ppms_bg_seeds), L, W)
# Perform one On-line and one batch EM pass
ppms_seeds, ppms_bg_seeds, fracs_seeds = \
self._online_em(X, ppms_seeds, ppms_bg_seeds, fracs_seeds, 1)
ppms, ppms_bg, fracs = \
self._batch_em(X, ppms_seeds, ppms_bg_seeds, fracs_seeds, 1)
log_likelihoods = self._compute_log_likelihood(X, ppms, ppms_bg, fracs)
# Filter away all invalid parameter sets
# Removed the right-most filter since it was causing issues for some people
bool_mask = (log_likelihoods != np.inf) #& (fracs > min_frac) & (fracs < max_frac)
indices = torch.arange(0, len(bool_mask), 1).long()
if self.cuda:
indices = indices.cuda()
indices = indices[bool_mask]
log_likelihoods = log_likelihoods[indices]
ppms = ppms[indices]
ppms_bg = ppms_bg[indices]
fracs = fracs[indices]
ppms_seeds = ppms_seeds[indices]
# Select seed that yields highest log likelihood after one online and one batch EM passes
max_log_likelihoods, max_log_likelihoods_index = log_likelihoods.max(dim=0)
max_log_likelihoods_index = max_log_likelihoods_index.item() # Replaced [0] w/ .item() for PyTorch >= 0.4
word_seed_best = sequences.decode(
[ppms_seeds_original[max_log_likelihoods_index].round().astype(np.uint8)], self.alpha)[0]
print('Using seed originating from word: ' + word_seed_best)
ppm_best = ppms[[max_log_likelihoods_index]]
ppm_bg_best = ppms_bg[[max_log_likelihoods_index]]
frac_best = fracs[[max_log_likelihoods_index]]
# Refine the best seed with batch EM passes
ppm_best, ppm_bg_best, frac_best = \
self._batch_em(X, ppm_best, ppm_bg_best, frac_best, self.maxiter)
ppms_final.append(ppm_best[0].cpu().numpy())
ppms_bg_final.append(ppm_bg_best[0].cpu().numpy())
fracs_final.append(frac_best[0])
n_sites_final.append(int(M * fracs_final[-1]))
if self.erasewhole:
print('Removing sequences containing at least one motif occurrence')
X = self._erase_seqs_containing_motifs(X_seqs_onehot, ppms_final[-1], ppms_bg_final[-1],
fracs_final[-1])
if X_neg is not None:
X_neg = self._erase_seqs_containing_motifs(X_neg_seqs_onehot, ppms_final[-1], ppms_bg_final[-1],
fracs_final[-1])
else:
print('Removing individual occurrences of motif occurrences')
X = self._erase_motif_occurrences(X_seqs_onehot, ppms_final[-1], ppms_bg_final[-1], fracs_final[-1])
if X_neg is not None:
X_neg = self._erase_motif_occurrences(X_neg_seqs_onehot, ppms_final[-1], ppms_bg_final[-1],
fracs_final[-1])
self.ppms_ = ppms_final
self.ppms_bg_ = ppms_bg_final
self.fracs_ = fracs_final
self.n_sites_ = n_sites_final
return X, X_neg
def _batch_em(self, X, ppms, ppms_bg, fracs, epochs):
M, L, W = X.shape
n_filters = len(ppms)
m_log_ratios = nn.Conv1d(L, n_filters, W, stride=W, bias=False)
fracs = fracs.view((1, n_filters, 1))
pfms = torch.zeros((n_filters, L, W))
pfms_bg = torch.zeros((n_filters, L, W))
counts = torch.zeros((n_filters, 1))
if self.cuda:
m_log_ratios.cuda()
pfms = pfms.cuda()
pfms_bg = pfms_bg.cuda()
counts = counts.cuda()
pbar_epoch = trange(0, epochs, 1, desc='Batch EM')
for i in pbar_epoch:
old_ppms = ppms
# E-step, compute membership weights and letter frequencies
pfms.zero_()
pfms_bg.zero_()
counts.zero_()
m_log_ratios.weight.data = torch.log(ppms) - torch.log(ppms_bg)
fracs_ratio = fracs / (1 - fracs)
for j in trange(0, M, self.batch_size, desc='Pass %i/%i' % (i + 1, epochs)):
batch = X[j:j + self.batch_size]
x = Variable(torch.from_numpy(batch).float())
if self.cuda:
x = x.cuda()
log_ratios = m_log_ratios(x).data
ratios = torch.exp(log_ratios)
c = self.fudge * fracs_ratio * ratios
state_probs = c / (1 + c)
counts.add_(state_probs.sum(dim=0))
batch_motif_matrix_counts = (state_probs.unsqueeze(-1) *
x.data.unsqueeze(1)).sum(dim=0)
pfms.add_(batch_motif_matrix_counts)
pfms_bg.add_(x.data.sum(dim=0).unsqueeze(0) - batch_motif_matrix_counts)
# M-step, update parameters
fracs = (counts / M).unsqueeze(0)
ppms = pfms / counts.unsqueeze(2)
ppms_bg = (pfms_bg.sum(dim=-1) /
(W * (M - counts))).unsqueeze(2).expand(n_filters, L, W)
ppms_diff_norm = (ppms - old_ppms).view(n_filters, -1).norm(p=2, dim=1)
max_ppms_diff_norm = ppms_diff_norm.max()
if max_ppms_diff_norm < self.tolerance:
pbar_epoch.set_description('Batch EM - convergence reached')
break
fracs = fracs.view(-1)
return ppms, ppms_bg, fracs
def _online_em(self, X, ppms, ppms_bg, fracs, epochs):
M, L, W = X.shape
n_filters = len(ppms)
m_log_ratios = nn.Conv1d(L, n_filters, W, stride=W, bias=False)
fracs = fracs.view((1, n_filters, 1))
# On-line EM specific-parameters
gamma_0 = 0.5
alpha = 0.85
s_0 = fracs.clone()[0].unsqueeze(-1)
s_1 = s_0 * ppms
s_1_bg = (1 - s_0) * ppms_bg
k = 0
indices = np.random.permutation(M)
if self.cuda:
m_log_ratios.cuda()
s_0 = s_0.cuda()
s_1 = s_1.cuda()
s_1_bg = s_1_bg.cuda()
pbar_epoch = trange(0, epochs, 1, desc='On-line EM')
for i in pbar_epoch:
old_ppms = ppms
for j in trange(0, M, self.batch_size, desc='Pass %i/%i' % (i + 1, epochs)):
k += 1
m_log_ratios.weight.data = torch.log(ppms) - torch.log(ppms_bg)
fracs_ratio = fracs / (1 - fracs)
# E-step, compute membership weights and letter frequencies for a batch
batch = X[indices[j:j + self.batch_size]]
actual_batch_size = len(batch)
gamma = 1.0 * actual_batch_size / self.batch_size * gamma_0 / (k ** alpha)
x = Variable(torch.from_numpy(batch).float())
if self.cuda:
x = x.cuda()
log_ratios = m_log_ratios(x).data
ratios = torch.exp(log_ratios)
c = self.fudge * fracs_ratio * ratios
state_probs = c / (1 + c)
s_0_temp = state_probs.mean(dim=0).unsqueeze(-1)
s_1_temp = (state_probs.unsqueeze(-1) *
x.data.unsqueeze(1)).mean(dim=0)
s_1_bg_temp = x.data.mean(dim=0).unsqueeze(0) - s_1_temp
# M-step, update parameters based on batch
s_0.add_(gamma * (s_0_temp - s_0))
s_1.add_(gamma * (s_1_temp - s_1))
s_1_bg.add_(gamma * (s_1_bg_temp - s_1_bg))
fracs = s_0.view((1, n_filters, 1))
ppms = s_1 / s_0
ppms_bg = (s_1_bg / (1 - s_0)).mean(-1, keepdim=True).expand((n_filters, L, W))
ppms_diff_norm = (ppms - old_ppms).view(n_filters, -1).norm(p=2, dim=1)
max_ppms_diff_norm = ppms_diff_norm.max()
if max_ppms_diff_norm < self.tolerance:
pbar_epoch.set_description('On-line EM - convergence reached')
break
fracs = fracs.view(-1)
return ppms, ppms_bg, fracs
def _compute_log_likelihood(self, X, ppms, ppms_bg, fracs):
M, L, W = X.shape
n_filters = len(ppms)
m_log_ppms_bg = nn.Conv1d(L, n_filters, W, bias=False)
m_log_ppms_bg.weight.data = torch.log(ppms_bg)
m_log_ratios = nn.Conv1d(L, n_filters, W, bias=False)
m_log_ratios.weight.data = torch.log(ppms) - torch.log(ppms_bg)
fracs = fracs.view((1, n_filters, 1))
log_likelihoods = torch.zeros(n_filters)
fracs_ratio = fracs / (1 - fracs)
log_fracs_bg = torch.log(1 - fracs)
if self.cuda:
m_log_ppms_bg.cuda()
log_likelihoods = log_likelihoods.cuda()
m_log_ratios.cuda()
for j in trange(0, M, self.batch_size, desc='Computing log likelihood'):
batch = X[j:j + self.batch_size]
x = Variable(torch.from_numpy(batch).float())
if self.cuda:
x = x.cuda()
ppms_bg_logprob = m_log_ppms_bg(x).data
log_ratios = m_log_ratios(x).data
ratios = torch.exp(log_ratios)
# Added back self.fudge here, since this is the quantity that EM is technically optimizing
log_likelihoods.add_((log_fracs_bg + ppms_bg_logprob +
torch.log(1 + self.fudge * fracs_ratio * ratios)).sum(dim=0).view(-1))
return log_likelihoods
def _erase_motif_occurrences(self, seqs_onehot, ppm, ppm_bg, frac):
t = np.log((1 - frac) / frac) # Threshold
spec = np.log(ppm) - np.log(ppm_bg) # spec matrix
spec_revcomp = spec[::-1, ::-1]
L, W = ppm.shape
for i in trange(0, len(seqs_onehot), 1, desc='Erasing motif occurrences'):
s = seqs_onehot[i] # grab the one hot coded sequence
seqlen = s.shape[1]
if seqlen < W: # leave short sequences alone
continue
indices = np.arange(seqlen - W + 1)
conv_signal = signal.convolve2d(spec, s, 'valid')[0]
seq_motif_sites = indices[conv_signal > t]
if self.revcomp:
conv_signal_revcomp = signal.convolve2d(spec_revcomp, s, 'valid')[0]
seq_motif_sites_revcomp = indices[conv_signal_revcomp > t]
seq_motif_sites = np.concatenate((seq_motif_sites, seq_motif_sites_revcomp))
for motif_site in seq_motif_sites:
s[:, motif_site:motif_site+W] = 0
seqs = sequences.decode(seqs_onehot, self.alpha)
return seqs
def _erase_seqs_containing_motifs(self, seqs_onehot, ppm, ppm_bg, frac):
t = np.log((1 - frac) / frac) # Threshold
spec = np.log(ppm) - np.log(ppm_bg) # spec matrix
spec_revcomp = spec[::-1, ::-1]
L, W = ppm.shape
seqs_onehot_filtered = []
for i in trange(0, len(seqs_onehot), 1, desc='Removing sequences with motif occurrences'):
s = seqs_onehot[i] # grab the one hot coded sequence
if s.shape[1] < W: # leave short sequences alone
seqs_onehot_filtered.append(s)
continue
conv_signal = signal.convolve2d(spec, s, 'valid')[0]
s_has_motif = np.any(conv_signal > t)
if self.revcomp:
conv_signal_revcomp = signal.convolve2d(spec_revcomp, s, 'valid')[0]
s_has_motif = s_has_motif or np.any(conv_signal_revcomp > t)
if not s_has_motif:
seqs_onehot_filtered.append(s)
seqs = sequences.decode(seqs_onehot_filtered, self.alpha)
return seqs
|
the-stack_0_23426 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import TestCase
from liminal.runners.airflow.tasks.defaults import job_end
from tests.util import dag_test_utils
# noinspection DuplicatedCode
class TestJobEndTask(TestCase):
def test_apply_task_to_dag(self):
dag = dag_test_utils.create_dag()
task0 = job_end.JobEndTask(
dag,
{'metrics': {'namespace': 'EndJobNameSpace', 'backends': ['cloudwatch']}},
{'pipeline': 'my_end_pipeline'},
{},
None,
'all_done'
)
task0.apply_task_to_dag()
self.assertEqual(len(dag.tasks), 1)
dag_task0 = dag.tasks[0]
self.assertEqual(dag_task0.namespace, 'EndJobNameSpace')
self.assertEqual(dag_task0.backends, ['cloudwatch'])
self.assertEqual(dag_task0.task_id, 'end')
def test_apply_task_to_dag_missing_metrics(self):
conf = {'pipeline': 'my_pipeline'}
dag = dag_test_utils.create_dag()
task0 = job_end.JobEndTask(dag, {}, {'pipeline': 'my_end_pipeline'}, conf, None, 'all_done')
task0.apply_task_to_dag()
self.assertEqual(len(dag.tasks), 1)
dag_task0 = dag.tasks[0]
self.assertEqual(dag_task0.namespace, '')
self.assertEqual(dag_task0.backends, [])
self.assertEqual(dag_task0.trigger_rule, 'all_done')
def test_apply_task_to_dag_with_partial_configuration(self):
dag = dag_test_utils.create_dag()
task0 = job_end.JobEndTask(dag,
{'metrics': {'namespace': 'EndJobNameSpace'}},
{'pipeline': 'my_end_pipeline'},
{},
None,
'all_done')
task0.apply_task_to_dag()
self.assertEqual(len(dag.tasks), 1)
dag_task0 = dag.tasks[0]
self.assertEqual(dag_task0.namespace, 'EndJobNameSpace')
self.assertEqual(dag_task0.backends, [])
self.assertEqual(dag_task0.trigger_rule, 'all_done')
if __name__ == '__main__':
unittest.main()
|
the-stack_0_23428 | class Solution:
def minDistance(self, word1: str, word2: str) -> int:
m = len(word1)
n = len(word2)
# dp[i][j] := min # of operations to convert word1[0..i) to word2[0..j)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
dp[i][0] = i
for j in range(1, n + 1):
dp[0][j] = j
for i in range(1, m + 1):
for j in range(1, n + 1):
if word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = min(dp[i - 1][j - 1], dp[i - 1][j], dp[i][j - 1]) + 1
return dp[m][n]
|
the-stack_0_23429 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import random
from op_test import OpTest
class TestSeqProject(OpTest):
def setUp(self):
self.init_test_case()
self.op_type = 'sequence_conv'
if self.context_length == 1 \
and self.context_start == 0 \
and self.padding_trainable:
print("If context_start is 0 " \
"and context_length is 1," \
" padding_trainable should be false.")
return
# one level, batch size
x = np.random.uniform(0.1, 1, [self.input_size[0],
self.input_size[1]]).astype('float32')
w = np.random.uniform(0.1, 1, [
self.context_length * self.input_size[1], self.output_represention
]).astype('float32')
begin_pad = np.max([0, -self.context_start])
end_pad = np.max([0, self.context_start + self.context_length - 1])
total_pad = begin_pad + end_pad
padding_data = np.random.uniform(
0.1, 1, [total_pad, self.input_size[1]]).astype('float32')
self.pad_data = padding_data
self.inputs = {
'X': (x, self.lod),
'Filter': w,
}
self.inputs_val = ['X', 'Filter']
self.inputs_val_no_x = ['Filter']
self.inputs_val_no_f = ['X']
if total_pad != 0:
self.inputs['PaddingData'] = padding_data
self.inputs_val = ['X', 'PaddingData', 'Filter']
self.inputs_val_no_x = ['PaddingData', 'Filter']
self.inputs_val_no_f = ['PaddingData', 'X']
self.attrs = {
'contextStart': self.context_start,
'contextLength': self.context_length,
'paddingTrainable': self.padding_trainable,
'contextStride': self.context_stride
}
out = np.zeros(
(self.input_size[0], self.output_represention)).astype('float32')
self.outputs = {'Out': out}
self.compute()
def compute(self):
x, lod = self.inputs['X']
filter = self.inputs['Filter']
pading_data = self.pad_data
out = np.zeros((self.input_size[0], self.context_length *
self.input_size[1])).astype('float32')
offset = [0]
for seq_len in lod[0]:
offset.append(offset[-1] + seq_len)
begin_pad = np.max([0, -self.context_start])
for i in range(len(offset) - 1):
for j in range(self.context_length):
in_begin = offset[i] + self.context_start + j
in_end = offset[i + 1] + self.context_start + j
out_begin = offset[i]
out_end = offset[i + 1]
if in_begin < offset[i]:
pad_size = np.min(
[offset[i] - in_begin, offset[i + 1] - offset[i]])
if self.padding_trainable:
sub_w = pading_data[j:j + pad_size, :]
out[offset[i]:offset[i] + pad_size, j * self.input_size[
1]:(j + 1) * self.input_size[1]] = sub_w
out_begin = offset[i] + pad_size
in_begin = offset[i]
if in_end > offset[i + 1]:
pad_size = np.min(
[in_end - offset[i + 1], offset[i + 1] - offset[i]])
if self.padding_trainable:
sub_w = pading_data[begin_pad + self.context_start + j -
pad_size:begin_pad +
self.context_start + j, :]
out[offset[i + 1] - pad_size:offset[i + 1], j * self.
input_size[1]:(j + 1) * self.input_size[1]] = sub_w
in_end = offset[i + 1]
out_end = offset[i + 1] - pad_size
if in_end <= in_begin:
continue
in_sub = x[in_begin:in_end, :]
out[out_begin:out_end, j * self.input_size[1]:(j + 1) *
self.input_size[1]] += in_sub
np.dot(out, filter, out=self.outputs['Out'])
def test_check_output(self):
self.check_output()
def test_check_grad(self):
if self.padding_trainable:
self.check_grad(
set(self.inputs_val), 'Out', max_relative_error=0.05)
def test_check_grad_input(self):
self.check_grad(
['X'],
'Out',
max_relative_error=0.05,
no_grad_set=set(self.inputs_val_no_x))
def test_check_grad_padding_data(self):
if self.padding_trainable:
self.check_grad(
['PaddingData'],
'Out',
max_relative_error=0.05,
no_grad_set=set(['X', 'Filter']))
def test_check_grad_Filter(self):
self.check_grad(
['Filter'],
'Out',
max_relative_error=0.05,
no_grad_set=set(self.inputs_val_no_f))
def test_check_grad_input_filter(self):
if self.padding_trainable:
self.check_grad(
['X', 'Filter'],
'Out',
max_relative_error=0.05,
no_grad_set=set(['PaddingData']))
def test_check_grad_padding_input(self):
if self.padding_trainable:
self.check_grad(
self.inputs_val_no_f,
'Out',
max_relative_error=0.05,
no_grad_set=set(['Filter']))
def test_check_grad_padding_filter(self):
if self.padding_trainable:
self.check_grad(
self.inputs_val_no_x,
'Out',
max_relative_error=0.05,
no_grad_set=set(['X']))
def init_test_case(self):
self.input_row = 11
self.context_start = 0
self.context_length = 1
self.padding_trainable = False
self.context_stride = 1
self.input_size = [self.input_row, 23]
offset_lod = [[0, 4, 5, 8, self.input_row]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
for i in range(len(offset_lod[0]) - 1):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
class TestSeqProjectCase1(TestSeqProject):
def init_test_case(self):
self.input_row = 11
self.context_start = -1
self.context_length = 3
self.padding_trainable = True
self.context_stride = 1
self.input_size = [self.input_row, 23]
offset_lod = [[0, 4, 5, 8, self.input_row]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
for i in range(len(offset_lod[0]) - 1):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
class TestSeqProjectCase2(TestSeqProject):
def init_test_case(self):
self.input_row = 25
self.context_start = 2
self.context_length = 3
self.padding_trainable = True
self.context_stride = 1
self.input_size = [self.input_row, 23]
idx = list(range(self.input_size[0]))
del idx[0]
offset_lod = [[0] + np.sort(random.sample(idx, 8)).tolist() +
[self.input_size[0]]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
for i in range(len(offset_lod[0]) - 1):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
if __name__ == '__main__':
unittest.main()
|
the-stack_0_23430 |
#!/usr/bin/python
# # ^^^^^^ -- indicates parameters to change
import numpy as np
import pylab as pl
import pywcs
import pickle
##
#from matplotlib.patches import Rectangle,Circle
##
pxscale = 0.2507 / 2. # unbinned
dcr = 4. / 60. # radius of field (deg)
#dcr=3.9/60. # radius of field (deg)
# shrink radius slightly to avoid part of slit being drawn outside of mask circle?
iplot = 0 #1
idebug = 0
# ^^^^^^
#niter=10
# global CCD parameters:
ccd_dx = 2034.
ccd_xgap = 70.
ccd_dy = 4102.
ccd_cx = (2. * (ccd_dx + ccd_xgap) +ccd_dx) / 2.
ccd_cy = ccd_dy / 2.
def RSSpixfromSky(cra,cdec,rotang,ra,dec,equinox):
# -- Make a WCS to convert to RSS pixel coords
# Create a new WCS object. The number of axes must be set
# from the start
wcs = pywcs.WCS(naxis=2)
# wcs.wcs.crpix = [3000.,2000.] # made up !! ****
wcs.wcs.crpix = [ccd_cx,ccd_cy]
#wcs.wcs.cdelt = np.array([-pxscale, pxscale])
wcs.wcs.cdelt = np.array([-pxscale, pxscale])/3600.
wcs.wcs.crval = [cra, cdec]
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
# wcs.wcs.crota = [rotang, rotang] #** CHECK **
wcs.wcs.crota = [-rotang, -rotang] # rotate SKY this amount?
# [This is consistent with original RSMT angle definition]
wcs.wcs.equinox=equinox
# wcs.wcs.print_contents()
xpix,ypix = wcs.wcs_sky2pix(ra,dec, 1)
return xpix,ypix
def defineSpec(xobj, yobj, grating, filt, slen):
# -- Use optical model to obtain wavelength limits (converted to pixels)
# based on grating and filter selected. For now just make something up
#xcen=3000. #**** made-up!
xspec = xobj #(xobj-xcen)+(-0.2)*(xobj-xcen)+xcen # make up anamorphic factor
yspec = yobj # make up anamorphic factor
#^^^^^^^^
# change by hand for grating+filter - will be set automatically eventually
#xlo=400. # spectral length to left of slit (unbinned pix)
#xhi=150. # ---"" ---- right ---""--
xlo = 3000.
xhi = 3000.
# -- start off with ylo/yhi half slit length
yhi = slen / 2.
ylo = slen / 2.
return xspec,yspec,xlo,xhi,ylo,yhi
def MonteRealise(x0, y0, x1, y1, pri):
# ---- Monte Carlo assignment of weights, one realisation
nums = np.random.random(np.shape(x0))
keep = np.zeros(np.shape(x0)).astype('int')
ok = (nums < pri).nonzero()
keep[ok] = 1
return keep
def ResolveCollisions(xspe, yspe ,x0, y0, x1, y1, tkeep, cx, cy, xpad, ypad, \
allocated, prisort):
# ---- Make a first pass mask starting with object closest to central
# and resolving collisions
#**** check aboot skipping pri=-1. (setup star) obejcts ****
#^^^^^^^^
#idebug=0#1
ivertdis=1 # use vertical distance (best for one tier of spectra)
# otherwise use radial distance when distance sorting (best for multi-tier?)
# if prisort: print "::::::::",pri
# if prisort: print "::::::::",tkeep
if prisort: # keep is priority instead of keepflag, sort by this
tpri = tkeep
ss = (np.argsort(tpri))[::-1] # reverse sort
if idebug: print(tpri[ss])
tkeep = np.ones(np.shape(tpri)).astype('int')
rej = np.reshape((tpri == 0.).nonzero(), -1)
tkeep[rej] = 0
else:
if ivertdis:
dis2 = (yspe-cy)**2
ss = np.argsort(dis2)
else:
dis2 = (xspe-cx)**2 + (yspe-cy)**2
ss = np.argsort(dis2)
tx0 = x0[ss] - xpad / 2.
tx1 = x0[ss] + x1[ss] + xpad / 2. # [ careful switching between lower left and width and lower left and upper right notation ]
ty0 = y0[ss] - ypad / 2.
ty1 = y0[ss] + y1[ss] + ypad / 2.
skeep =tkeep[ss].copy()
inmask = np.zeros(np.shape(x0)).astype('int')
# -- add in already allocated slits
ok = np.reshape((allocated[ss] == 1).nonzero(), -1)
inmask[ok] = 1
# --
for ii in range(np.size(x0)):
if skeep[ii] == 0:
olap = 0
continue
if idebug: print(ii)
# keep is either priority or always 1, depending on mode
# if skeep[ii] <= 0: continue # don't care about pri=0. objects or collisions of ref stars with others
if prisort:
if tpri[ss[ii]] <= 0:
continue # don't care about pri=0. objects or collisions of ref stars with others
#--
# if inmask[ii]==0 and keep[ii]<0.: continue # don't add a new setup star
#--
if inmask[ii] == 1:
if idebug: print(ii,' already in mask')
continue # no need to check if this slit collides
used=np.reshape((inmask == 1).nonzero(), -1)
if idebug:
print(np.size(used),' slits currently in mask')
olap = 0
for jj in range(np.size(used)):
# -- check if this rectangle overlaps with any currently in mask:
if idebug:
print('comparing slit ',ii,' with ',used[jj])
# http://tech-read.com/2009/02/06/program-to-check-rectangle-overlapping/
r1x1 = tx0[ii]
r1x2 = tx1[ii]
r1y1 = ty0[ii]
r1y2 = ty1[ii]
r2x1 = tx0[used[jj]]
r2x2 = tx1[used[jj]]
r2y1 = ty0[used[jj]]
r2y2 = ty1[used[jj]]
# isOVerlap= ((r1x2 >= r2x1) &&
# (r1y2 >= r2y1) &&
# (r1x1 <= r2x2) &&
# (r1y1 <= r2y2));
#print np.shape(r1x2),np.shape(r2x1)
if ((r1x2 >= r2x1) and \
(r1y2 >= r2y1) and \
(r1x1 <= r2x2) and \
(r1y1 <= r2y2)) : olap=1
# else: olap=0
if idebug:
print(r1y1,r1y2,r2y1,r2y2)
#if (r1y2 >= r2y1) and (r1y1 <= r2y2) : olap=1
if idebug:
print('olap: ',olap)
# as soon as this slit overlaps with any one already in mask, we can reject it
if olap == 1:
break # hopefully just exits this inner loop
# if we checked against all slits on mask and no collisions, then keep:
if olap==0:
inmask[ii]=1
# if prisort:
# print "adding new slit:"
# print ii,skeep[ii],ty1[ii]-ty0[ii]
# print ii,tpri[ss[ii]],ty1[ii]-ty0[ii]
# print
#*** careful at end. inmask index is aligned with sorted-distance coords ***
keepflag = np.zeros(np.shape(x0))
ok = np.reshape((inmask == 1).nonzero(),-1)
keepflag[ss[ok]] = 1
return keepflag
def firstpass(Nstars_req, xspe, yspe, x0, y0, x1, y1, keep, cx, cy, xpad, \
ypad, pri):
allocated=np.zeros(np.shape(x0)).astype('int') # none already allocated
# -- Now add setup *before* any science slits, otherwise each star wipes out
# a vertical strip equivalent to the length of a science slit!
allocated=setupStars(Nstars_req, xspe, yspe, x0, y0, x1, y1, pri, cx, cy, \
xpad, ypad, allocated)
res=ResolveCollisions(xspe, yspe, x0, y0, x1, y1, keep, cx, cy, xpad, \
ypad, allocated,0)
return res
def addmore(xspe, yspe, x0, y0, x1, y1, pri, cx, cy, xpad, ypad, in1stmask):
# -- add more slits from full list (pri>0) after firstpass:
# keep=np.zeros(np.size(pri))
# ok=np.reshape((pri>0.).nonzero(),-1)
# keep[ok]=1
# sort by priority, so when turning off sorting, highest priorities are assigned first
res=ResolveCollisions(xspe, yspe, x0, y0, x1, y1, pri, cx, cy, xpad, ypad, \
in1stmask, 1)
return res
def tweakslits(xspe, yspe, x0, y0, x1, y1, pri, cx, cy, xpad, ypad, inprevmask):
# ******** TODO ********
# -- add more slits from full list (pri>0) after firstpass:
# keep=np.zeros(np.size(pri))
# ok=np.reshape((pri>0.).nonzero(),-1)
# keep[ok]=1
# sort by priority, so when turning off sorting, highest priorities are assigned first
y1sh = y1 * 0.8
y0sh = y0 + 0.1 * y1 # make 80% slit length, but keep same centre
res = ResolveCollisions(xspe, yspe, x0, y0sh ,x1, y1sh, pri, cx, cy, xpad, \
ypad, inprevmask, 1)
return res
def bestStars(sx,sy):
# choose "optimal" stars from a larger list.
# take 4-6 with best distribution across field
# assume all have already been pre-selected to lie in a suitable mag range
#**** dummy - RANDOM ****
inds = np.random.random(np.shape(sx))
outind = np.argsort(inds)[0:6]
# print outind
return outind
def setupStars(Nstars_req,xspe,yspe,x0,y0,x1,y1,pri,cx,cy,xpad,ypad,inprevmask):
# for now just do this in a dumb way. Design the science mask first,
# then throw in setup stars, removing science slits where collisions occur.
# Probably best to do this after "firstmask" and before "addmore"
# **** NOTE: need to deal with "musthave" objects correctly here and in
# firstmask, etc. ****
# -- This should run like a simplified version of ResolveCollisions
# check initial star list. This should be objects with priority=-1
#****
# (i think this will just work naturally in the science slit allocation)
#****
# print np.sum(inprevmask).astype('int'),"slits before setup stars added"
stars = np.reshape((pri==-1.).nonzero(),-1)
nstars = np.size(stars)
# print 'NSTARS = ',nstars
# if nstars < 4:
# print "NO SETUP STARS IN CATALOGUE"
# print "You must select these manually"
# return inprevmask
#if nstars > 6:
if nstars > Nstars_req:
# select best stars:
tusestars = bestStars(x0[stars],y0[stars])
usestars = stars[tusestars]
nstars = np.size(usestars)
else:
usestars = stars
# print 'N_USESTARS = ',np.size(usestars)
# print usestars
# == Setup stars use 1" diameter holes
# reset width and length of these slit
#x1[usestars]=1./pxscale # spectral length not slit width??
#y1[usestars]=1./pxscale
# ** set in input catalogue now **
skeep = inprevmask
skeep[usestars] = 1 # add stars to mask
#-- increase priority so stars override science objects
# print pri
# print (pri==99).nonzero()
pri[usestars] = 10.
# print (pri==10).nonzero()
#--
tx0 = x0 - xpad / 2.
tx1 = x0 + x1 + xpad / 2. # [ careful switching between lower left and width and lower left and upper right notation ]
ty0 = y0 - ypad / 2.
ty1 = y0 + y1 + ypad / 2.
# for each science slit already in mask, check if it hits a setup star
# if so, remove the former:
for ii in range(np.size(x0)):
if inprevmask[ii] != 1: # only care about collisions with allocated objects
continue
if pri[ii] == 10:
continue # this is a star itself!
for jj in range(nstars):
# -- check if this rectangle overlaps with any currently in mask:
if idebug: print('comparing slit ',ii,' with ',usestars[jj])
r1x1 = tx0[ii]
r1x2 = tx1[ii]
r1y1 = ty0[ii]
r1y2 = ty1[ii]
r2x1 = tx0[usestars[jj]]
r2x2 = tx1[usestars[jj]]
r2y1 = ty0[usestars[jj]]
r2y2 = ty1[usestars[jj]]
if ((r1x2 >= r2x1) and \
(r1y2 >= r2y1) and \
(r1x1 <= r2x2) and \
(r1y1 <= r2y2)) :
skeep[ii] = 0 #olap=1
continue # once collided, no need to consider further
# else: olap=0
if idebug: print(r1y1,r1y2,r2y1,r2y2)
if idebug: print("removing science slit due to setup star collision")
if idebug:
if skeep[ii] == 0: print("removing science slit due to setup star collision")
# summarise results
# print np.sum(skeep).astype('int'),"slits afterwards, with ",nstars,"setup stars"
# print (pri==10).nonzero()
# print skeep[np.reshape((pri==10).nonzero(),-1)]
pri[np.reshape((pri == 10).nonzero(), -1)] = -1. # this is a global variable, so need to rest after each iter!
return skeep
# ==============================================================================
# ---- design an RSS mask
# added option to select Nstars_req from pri=-1. objects.
# needs to be some external check that this is not larger than number in catalogue!
def pyslit_optimize(cra, cdec, rotang, equinox, ra, dec, pri, slen_as, swid_as, \
stilt, Niter, opt_ypad, Nstars_req=0):
slen = slen_as / pxscale
swid = swid_as / pxscale
# ---- Convert sky coords to RSS pixel coords (create dummy WCS for now)
# These are coordinates for the object. Need to keep separate positions for the ref. wavelength under slit position due to anamorphic magnification
xobj, yobj = RSSpixfromSky(cra, cdec, rotang, ra, dec, equinox)
# ---- set-up properties of spectra in pixel coords
# need lengths of spectra left and right, above and below object
grating = 'fred'
filt = 'harry'
xspe, yspe, xle, xri, yab, ybe = defineSpec(xobj, yobj, grating, filt, slen)
# **** Need to add a check for desired wavelength range staying on detector ****
x0 = xspe - xle
x1 = np.zeros(np.size(x0)) + xle + xri
y0 = yspe - ybe
y1 = ybe + yab
#for kk in np.arange(np.size(x0)):
# if pri[kk]<1.0: col='r'
# if pri[kk]<0.7: col='b'
# if pri[kk]<0.5: col='g'
#
# rect=Rectangle((x0[kk],y0[kk]),x1[kk],y1[kk],color='none',ec=col)
# ax.add_patch(rect)
#pl.draw()
# /////
# pri=-1 -- setup star
# specify nsetupstars. if there are more p=-1 objs than this, choose in some optimum way
#
# pri=9 is a must-have object which specifies point in mask to sort in distance from when optimising (not nec. cra,cdec)
# if not set, sort from centre of mask
# other must-have objects just p=1.
#////
#***need to set safety margins too. just extend slit length and shrink back later?
# -- construct first pass realisation, just keep subsample of full object list
# based on priorities. No checking for slit collisions yet.
maxwt = 0
for mm in range(Niter):
# print 'iter:',(mm+1)
keep = MonteRealise(x0, y0, x1, y1, pri)
# -- Now, order these by distance from desired object and remove colliding slits
# -- centre of mask if not set to specific object
#cx = 3000.
#cy = 2000.
cx = ccd_cx
cy = ccd_cy
xpad = 1. / pxscale # arcsec
ypad = opt_ypad / pxscale
in1mask = firstpass(Nstars_req, xspe, yspe, x0, y0, x1, y1, keep, cx, \
cy, xpad, ypad, pri)
ok = np.reshape((pri > 0.).nonzero(), -1)
# print "First pass allocated",np.sum(in1mask[ok]).astype('int'),"science slits"\
# ' with a total weight of %.1f'%np.sum(in1mask[ok]*pri[ok])
# ' with a total weight of %.1f'%np.sum(in1mask*pri)
#--
## inwstars=setupStars(xspe,yspe,x0,y0,x1,y1,pri,cx,cy,xpad,ypad,in1mask)
## in1mask=inwstars
# sss=np.reshape( ((inwstars == 1) & (pri==-1)).nonzero() ,-1)
# print '>>> after setupStars ',np.size(sss)
#--
# -- add more slits from the full list (pri>0.) before 1st MC weights were applied
bssss = np.reshape( ((in1mask == 1) & (pri==-1)).nonzero() ,-1)
# print '<<< before admore ',np.size(bssss)
cumul = 0.
inmask1 = in1mask
# print pri
cumul = addmore(xspe,yspe,x0,y0,x1,y1,pri,cx,cy,xpad,ypad,inmask1)
## **** don't try 2nd iteration for now. for some reason this adds lots of extra setup stars! ****
# cumul=in1mask
##
ssss = np.reshape( ((cumul == 1) & (pri==-1)).nonzero() ,-1)
# print '>>> after admore ',np.size(ssss)
ok = np.reshape((pri > 0.).nonzero(), -1)
# print "After second pass: ",np.sum(cumul[ok]).astype('int'),"science slits"\
# ' with a total weight of %.1f'%np.sum(cumul[ok]*pri[ok])
totwt = np.sum(cumul * pri)
if totwt > maxwt:
maxwt = totwt
# -- write results for this realisation
# only need to write indices of slits in mask:
output = open('data.pkl', 'wb')
pickle.dump(cumul,output)
output.close()
# reset
cumul[:] = 0.
in1mask[:] = 0.
# inwstars[:]=0.
# print
# print 'maxwt = %.1f'%maxwt
# print
# -- Read in indices of best result
pkl_file = open('data.pkl', 'rb')
inds = pickle.load(pkl_file)
# print np.sum(inds),' slits in mask'
# drawspectra(xspe,yspe,x0,y0,x1,y1,pri,cx,cy,xpad,ypad,inds,swid,xobj)
use = np.reshape((inds == 1.0).nonzero(),-1)
## np.savetxt('maskout.txt',np.transpose((ra[use],dec[use],pri[use])))
# try adding some shorter slits in to identify where it might be possible to shift slits around
##cumul2 = tweakslits(xspe,yspe,x0,y0,x1,y1,pri,cx,cy,xpad,ypad,cumul)
# **** this is indices of the used flags. prob want running_index[use] as return argument
print('these are the returned indexes', use)
print('Optimizer completed...')
return use
|
the-stack_0_23431 | import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
try:
from atomiclong import ffi
except ImportError:
ext_modules=[]
else:
ext_modules=[ffi.verifier.get_extension()]
with open('README.rst') as r:
README = r.read()
setup(
name='atomiclong',
version='0.1.1',
author='David Reid',
author_email='[email protected]',
url='https://github.com/dreid/atomiclong',
description="An AtomicLong type using CFFI.",
long_description=README,
license='MIT',
py_modules=['atomiclong'],
setup_requires=['cffi'],
install_requires=['cffi'],
tests_require=['pytest'],
ext_modules=ext_modules,
zip_safe=False,
cmdclass={"test": PyTest},
)
|
the-stack_0_23433 | from copy import deepcopy
from aws_cdk import (
core,
aws_lambda_event_sources as lambda_sources,
)
from multacdkrecipies.common import base_alarm, base_lambda_function, base_queue
from multacdkrecipies.recipies.utils import SQS_CONFIG_SCHEMA, validate_configuration
class AwsSqsPipes(core.Construct):
"""
AWS CDK Construct that defines a pipe where a message is sent to an SQS Queue and a Lambda function or functions
subscribed to the topic can process it and take proper actions. The construct allows to set alerts on both resources
the SQS Queue and the Lambda Functions.
"""
def __init__(self, scope: core.Construct, id: str, *, prefix: str, environment: str, configuration, **kwargs):
"""
:param scope: Stack class, used by CDK.
:param id: ID of the construct, used by CDK.
:param prefix: Prefix of the construct, used for naming purposes.
:param environment: Environment of the construct, used for naming purposes.
:param configuration: Configuration of the construct. In this case SQS_CONFIG_SCHEMA.
:param kwargs: Other parameters that could be used by the construct.
"""
super().__init__(scope, id, **kwargs)
self.prefix = prefix
self.environment_ = environment
self._configuration = configuration
# Validating that the payload passed is correct
validate_configuration(configuration_schema=SQS_CONFIG_SCHEMA, configuration_received=self._configuration)
# Defining SQS Queue
queue_data = deepcopy(self._configuration["queue"])
self._sqs_queue = base_queue(construct=self, **queue_data)
# Validating Lambda Function Runtime
functions_data = self._configuration["lambda_handlers"]
self._lambda_functions = list()
for lambda_function in functions_data:
_lambda_function = base_lambda_function(self, **lambda_function)
self._lambda_functions.append(_lambda_function)
_lambda_function.add_event_source(lambda_sources.SqsEventSource(queue=self._sqs_queue, batch_size=10))
def set_alarms(self):
"""
Function that set alarms for the resources involved in the construct.
:return: None
"""
if isinstance(self._configuration["queue"].get("alarms"), list) is True:
sqs_alarms = list()
for alarm_definition in self._configuration["queue"].get("alarms"):
sqs_alarms.append(
base_alarm(
self,
resource_name=self._configuration["queue"]["queue_name"],
base_resource=self._sqs_queue,
**alarm_definition,
)
)
for lambda_function_data, lambda_function_definition in zip(
self._configuration["lambda_handlers"], self._lambda_functions
):
if isinstance(lambda_function_data.get("alarms"), list) is True:
lambda_alarms = list()
for alarm_definition in lambda_function_data.get("alarms"):
lambda_alarms.append(
base_alarm(
self,
resource_name=lambda_function_data.get("lambda_name"),
base_resource=lambda_function_definition,
**alarm_definition,
)
)
@property
def configuration(self) -> dict:
"""
:return: Construct configuration.
"""
return self._configuration
@property
def sqs_queue(self):
"""
:return: Construct SQS Queue.
"""
return self._sqs_queue
@property
def lambda_functions(self) -> list:
"""
:return: List of Constructs Lambda Functions.
"""
return self._lambda_functions
|
the-stack_0_23439 | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Functions for evaluating results computed for a json dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import numpy as np
import os
import uuid
from pycocotools.cocoeval import COCOeval
from core.config import cfg
from utils.io import save_object
import utils.boxes as box_utils
logger = logging.getLogger(__name__)
def evaluate_masks(
json_dataset,
all_boxes,
all_segms,
output_dir,
use_salt=True,
cleanup=False
):
res_file = os.path.join(
output_dir, 'segmentations_' + json_dataset.name + '_results'
)
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
_write_coco_segms_results_file(
json_dataset, all_boxes, all_segms, res_file)
# Only do evaluation on non-test sets (annotations are undisclosed on test)
if json_dataset.name.find('test') == -1:
coco_eval = _do_segmentation_eval(json_dataset, res_file, output_dir)
else:
coco_eval = None
# Optionally cleanup results json file
if cleanup:
os.remove(res_file)
return coco_eval
def _write_coco_segms_results_file(
json_dataset, all_boxes, all_segms, res_file
):
# [{"image_id": 42,
# "category_id": 18,
# "segmentation": [...],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_boxes):
break
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_segms_results_one_category(
json_dataset, all_boxes[cls_ind], all_segms[cls_ind], cat_id))
logger.info(
'Writing segmentation results json to: {}'.format(
os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def _coco_segms_results_one_category(json_dataset, boxes, segms, cat_id):
results = []
image_ids = json_dataset.COCO.getImgIds()
image_ids.sort()
assert len(boxes) == len(image_ids)
assert len(segms) == len(image_ids)
for i, image_id in enumerate(image_ids):
dets = boxes[i]
rles = segms[i]
if isinstance(dets, list) and len(dets) == 0:
continue
dets = dets.astype(np.float)
scores = dets[:, -1]
results.extend(
[{'image_id': image_id,
'category_id': cat_id,
'segmentation': rles[k],
'score': scores[k]}
for k in range(dets.shape[0])])
return results
def _do_segmentation_eval(json_dataset, res_file, output_dir):
coco_dt = json_dataset.COCO.loadRes(str(res_file))
coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
_log_detection_eval_metrics(json_dataset, coco_eval, output_dir)
eval_file = os.path.join(output_dir, 'segmentation_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
return coco_eval
def evaluate_boxes(
json_dataset, all_boxes, output_dir, use_salt=True, cleanup=False
):
res_file = os.path.join(
output_dir, 'bbox_' + json_dataset.name + '_results'
)
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
_write_coco_bbox_results_file(json_dataset, all_boxes, res_file)
# Only do evaluation on non-test sets (annotations are undisclosed on test)
if json_dataset.name.find('test') == -1:
coco_eval = _do_detection_eval(json_dataset, res_file, output_dir)
else:
coco_eval = None
# Optionally cleanup results json file
if cleanup:
os.remove(res_file)
return coco_eval
def _write_coco_bbox_results_file(json_dataset, all_boxes, res_file):
# [{"image_id": 42,
# "category_id": 18,
# "bbox": [258.15,41.29,348.26,243.78],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_boxes):
break
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_bbox_results_one_category(
json_dataset, all_boxes[cls_ind], cat_id))
logger.info(
'Writing bbox results json to: {}'.format(os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def _coco_bbox_results_one_category(json_dataset, boxes, cat_id):
results = []
image_ids = json_dataset.COCO.getImgIds()
image_ids.sort()
assert len(boxes) == len(image_ids)
for i, image_id in enumerate(image_ids):
dets = boxes[i]
if isinstance(dets, list) and len(dets) == 0:
continue
dets = dets.astype(np.float)
scores = dets[:, -1]
xywh_dets = box_utils.xyxy_to_xywh(dets[:, 0:4])
xs = xywh_dets[:, 0]
ys = xywh_dets[:, 1]
ws = xywh_dets[:, 2]
hs = xywh_dets[:, 3]
results.extend(
[{'image_id': image_id,
'category_id': cat_id,
'bbox': [xs[k], ys[k], ws[k], hs[k]],
'score': scores[k]} for k in range(dets.shape[0])])
return results
def _do_detection_eval(json_dataset, res_file, output_dir):
coco_dt = json_dataset.COCO.loadRes(str(res_file))
coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
_log_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.95, output_dir=output_dir)
_log_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.5, output_dir=output_dir)
_log_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.75, iou_high=0.75, output_dir=output_dir)
eval_file = os.path.join(output_dir, 'detection_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
return coco_eval
def _log_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.95, output_dir=None):
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
#IoU_lo_thresh = 0.5
#IoU_hi_thresh = 0.95
IoU_lo_thresh = iou_low
IoU_hi_thresh = iou_high
ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
class_maps = {}
class_maps['IoU_low'] = IoU_lo_thresh
class_maps['IoU_high'] = IoU_hi_thresh
# precision has dims (iou, recall, cls, area range, max dets)
# area range index 0: all area ranges
# max dets index 2: 100 per image
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
recall = coco_eval.eval['recall'][ind_lo:(ind_hi + 1), :, 0, 2]
ar_default = np.mean(recall[recall > -1])
logger.info(
'~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] ~~~~'.format(
IoU_lo_thresh, IoU_hi_thresh))
logger.info('Overall --> {:.2f},{:.2f}'.format(100 * ap_default, 100 * ar_default))
class_maps.update({'Overall' : 100*ap_default})
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][
ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
recall = coco_eval.eval['recall'][ind_lo:(ind_hi+1), cls_ind - 1, 0, 2]
ar = np.mean(recall[recall > -1])
logger.info(str(cls)+' --> {:.2f},{:.2f}'.format(100 * ap, 100 * ar))
class_maps.update({str(cls) : 100 * ap})
# save class-wise mAP
if not (output_dir is None):
with open(os.path.join(output_dir,'classmAP@IoUs'+str(iou_low)+'-'+str(iou_high)+'.json'),'w') as f:
json.dump(class_maps,f)
f.close()
logger.info('~~~~ Summary metrics ~~~~')
coco_eval.summarize()
def evaluate_box_proposals(
json_dataset, roidb, thresholds=None, area='all', limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
'all': 0,
'small': 1,
'medium': 2,
'large': 3,
'96-128': 4,
'128-256': 5,
'256-512': 6,
'512-inf': 7}
area_ranges = [
[0**2, 1e5**2], # all
[0**2, 32**2], # small
[32**2, 96**2], # medium
[96**2, 1e5**2], # large
[96**2, 128**2], # 96-128
[128**2, 256**2], # 128-256
[256**2, 512**2], # 256-512
[512**2, 1e5**2]] # 512-inf
assert area in areas, 'Unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = np.zeros(0)
num_pos = 0
for entry in roidb:
gt_inds = np.where(
(entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
gt_boxes = entry['boxes'][gt_inds, :]
gt_areas = entry['seg_areas'][gt_inds]
valid_gt_inds = np.where(
(gt_areas >= area_range[0]) & (gt_areas <= area_range[1]))[0]
gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(valid_gt_inds)
non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
boxes = entry['boxes'][non_gt_inds, :]
if boxes.shape[0] == 0:
continue
if limit is not None and boxes.shape[0] > limit:
boxes = boxes[:limit, :]
overlaps = box_utils.bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in range(min(boxes.shape[0], gt_boxes.shape[0])):
# find which proposal box maximally covers each gt box
argmax_overlaps = overlaps.argmax(axis=0)
# and get the iou amount of coverage for each gt box
max_overlaps = overlaps.max(axis=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,
'gt_overlaps': gt_overlaps, 'num_pos': num_pos}
def evaluate_keypoints(
json_dataset,
all_boxes,
all_keypoints,
output_dir,
use_salt=True,
cleanup=False
):
res_file = os.path.join(
output_dir, 'keypoints_' + json_dataset.name + '_results'
)
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
_write_coco_keypoint_results_file(
json_dataset, all_boxes, all_keypoints, res_file)
# Only do evaluation on non-test sets (annotations are undisclosed on test)
if json_dataset.name.find('test') == -1:
coco_eval = _do_keypoint_eval(json_dataset, res_file, output_dir)
else:
coco_eval = None
# Optionally cleanup results json file
if cleanup:
os.remove(res_file)
return coco_eval
def _write_coco_keypoint_results_file(
json_dataset, all_boxes, all_keypoints, res_file
):
results = []
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_keypoints):
break
logger.info(
'Collecting {} results ({:d}/{:d})'.format(
cls, cls_ind, len(all_keypoints) - 1))
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_kp_results_one_category(
json_dataset, all_boxes[cls_ind], all_keypoints[cls_ind], cat_id))
logger.info(
'Writing keypoint results json to: {}'.format(
os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def _coco_kp_results_one_category(json_dataset, boxes, kps, cat_id):
results = []
image_ids = json_dataset.COCO.getImgIds()
image_ids.sort()
assert len(kps) == len(image_ids)
assert len(boxes) == len(image_ids)
use_box_score = False
if cfg.KRCNN.KEYPOINT_CONFIDENCE == 'logit':
# This is ugly; see utils.keypoints.heatmap_to_keypoints for the magic
# indexes
score_index = 2
elif cfg.KRCNN.KEYPOINT_CONFIDENCE == 'prob':
score_index = 3
elif cfg.KRCNN.KEYPOINT_CONFIDENCE == 'bbox':
use_box_score = True
else:
raise ValueError(
'KRCNN.KEYPOINT_CONFIDENCE must be "logit", "prob", or "bbox"')
for i, image_id in enumerate(image_ids):
if len(boxes[i]) == 0:
continue
kps_dets = kps[i]
scores = boxes[i][:, -1].astype(np.float)
if len(kps_dets) == 0:
continue
for j in range(len(kps_dets)):
xy = []
kps_score = 0
for k in range(kps_dets[j].shape[1]):
xy.append(float(kps_dets[j][0, k]))
xy.append(float(kps_dets[j][1, k]))
xy.append(1)
if not use_box_score:
kps_score += kps_dets[j][score_index, k]
if use_box_score:
kps_score = scores[j]
else:
kps_score /= kps_dets[j].shape[1]
results.extend([{'image_id': image_id,
'category_id': cat_id,
'keypoints': xy,
'score': kps_score}])
return results
def _do_keypoint_eval(json_dataset, res_file, output_dir):
ann_type = 'keypoints'
imgIds = json_dataset.COCO.getImgIds()
imgIds.sort()
coco_dt = json_dataset.COCO.loadRes(res_file)
coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
coco_eval.params.imgIds = imgIds
coco_eval.evaluate()
coco_eval.accumulate()
eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
coco_eval.summarize()
return coco_eval
|
the-stack_0_23440 | # -*- coding: utf-8 -*-
"""MRI RF shimming.
"""
import sigpy as sp
import numpy as np
from sigpy import backend
from sigpy.mri import rf as rf
__all__ = ['calc_shims', 'init_optimal_spectral', 'init_circ_polar']
def calc_shims(shim_roi, sens, x0, dt, lamb=0, max_iter=50):
"""RF shim designer. Uses the Gerchberg Saxton algorithm.
Args:
shim_roi (array): region within volume to be shimmed. Mask of 1's and
0's. [dim_x dim_y dim_z]
sens (array): sensitivity maps. [Nc dim_x dim_y dim_z]
x0 (array) initial guess for shim values. [Nc 1]
dt (float): hardware sampling dwell time.
lamb (float): regularization term.
max_iter (int): max number of iterations.
Returns:
Vector of complex shim weights.
"""
k1 = np.expand_dims(np.array((0, 0, 0)), 0)
A = rf.PtxSpatialExplicit(sens, coord=k1, dt=dt,
img_shape=shim_roi.shape, ret_array=False)
alg_method = sp.alg.GerchbergSaxton(A, shim_roi, x0, max_iter=max_iter,
tol=10E-9, lamb=lamb)
while not alg_method.done():
alg_method.update()
return alg_method.x
def init_optimal_spectral(A, sens, preproc=False):
"""Function to return initial shim weights based on an optimal spectral
method, an eigenvector-based method.
Args:
A (linop): sigpy Linear operator.
sens (array): sensitivity maps. [Nc dim_x dim_y]
preproc (bool): option to apply preprocessing function before \
finding eigenvectors
Returns:
Vector of complex shim weights.
References:
Chandra, R., Zhong, Z., Hontz, J., McCulloch, V., Studer, C.,
Goldstein, T. (2017) 'PhasePack: A Phase Retrieval Library.'
arXiv:1711.10175.
"""
device = backend.get_device(sens)
xp = device.xp
with device:
if hasattr(A, 'repr_str') and A.repr_str == 'pTx spatial explicit':
Anum = A.linops[1].mat
else:
Anum = A
sens = sens.flatten()
n = Anum.shape[1]
Anumt = xp.transpose(Anum)
m = sens.size
y = sens ** 2
# normalize the measurements
delta = m / n
ymean = y / xp.mean(y)
# apply pre-processing function
yplus = xp.amax(y)
Y = (1 / m) * Anumt @ Anum
if preproc:
T = (yplus - 1) / (yplus + xp.sqrt(delta) - 1)
# unnormalize
T *= ymean
T = xp.transpose(xp.expand_dims(T, axis=1))
for mm in range(m):
col = Anum[mm, :]
aat = col * xp.transpose(col)
Y = Y + (1 / m) * T[mm] * aat
w, v = xp.linalg.eigh(Y)
return xp.expand_dims(v[:, 0], 1)
def init_circ_polar(sens):
"""Function to return circularly polarized initial shim weights. Provides
shim weights that set the phase to be even in the middle of the sens
profiles.
Args:
sens (array): sensitivity maps. [Nc dim_x dim_y]
Returns:
Vector of complex shim weights.
"""
dim = sens.shape[1]
device = backend.get_device(sens)
xp = device.xp
with device:
# As a rough approximation, assume that the center of sens profile is
# also the center of the object within the profile to be imaged.
phs = xp.angle(sens[:, xp.int(dim / 2), xp.int(dim / 2)])
phs_wt = xp.exp(-phs * 1j)
return xp.expand_dims(phs_wt, 1)
|
the-stack_0_23441 | from copy import deepcopy
def deep_merge(source, dest):
"""Deep merges source dict into dest dict."""
for key, value in source.iteritems():
if key in dest:
if isinstance(value, dict) and isinstance(dest[key], dict):
deep_merge(value, dest[key])
continue
elif isinstance(value, list) and isinstance(dest[key], list):
for item in value:
if item not in dest[key]:
dest[key].append(item)
continue
dest[key] = value
class ScopeBuilder(object):
"""A helper class used to build query scopes. This class is provided with a
list of scope functions (all of which return query args) which can then
be chained together using this builder to build up more complex queries."""
@classmethod
def unpack_scope(cls, scope):
"""Unpacks the response from a scope function. The function should return
either a query, a query and a projection, or a query a projection and
an query options hash."""
query = {}
projection = {}
options = {}
if isinstance(scope, tuple):
if len(scope) > 3:
raise ValueError("Invalid scope")
if len(scope) >= 1:
query = scope[0]
if len(scope) >= 2:
projection = scope[1]
if len(scope) == 3:
options = scope[2]
elif isinstance(scope, dict):
query = scope
else:
raise ValueError("Invalid scope")
return query, projection, options
@classmethod
def register_fn(cls, f):
"""Registers a scope function on this builder."""
def inner(self, *args, **kwargs):
try:
query, projection, options = cls.unpack_scope(f(*args, **kwargs))
new_query = deepcopy(self.query)
new_projection = deepcopy(self.projection)
new_options = deepcopy(self.options)
deep_merge(query, new_query)
new_projection.update(projection)
new_options.update(options)
return ScopeBuilder(self.model, self.fns, new_query,
new_projection, new_options)
except ValueError:
raise ValueError("Scope function \"{}\ returns an invalid scope".format(f.__name__))
setattr(cls, f.__name__, inner)
def __init__(self, model, fns, query={}, projection={}, options={}):
self.fns = fns
self.model = model
self.query = query
self.projection = projection
self.options = options
self._active_cursor = None
for fn in fns:
self.register_fn(fn)
@property
def cursor(self):
"""
Returns a cursor for the currently assembled query, creating it if
it doesn't already exist.
"""
if not self._active_cursor:
self._active_cursor = self.model.find(self.query,
self.projection or None,
**self.options)
return self._active_cursor
def __getitem__(self, index):
return self.cursor[index]
def __iter__(self):
return self.cursor.__iter__()
def __getattr__(self, key):
# If the method is not one of ours, attempt to find it on the cursor
# which will mean executing it.
if hasattr(self.cursor, key):
return getattr(self.cursor, key)
|
the-stack_0_23443 | from __future__ import unicode_literals
import re
import binascii
try:
from Crypto.Cipher import AES
can_decrypt_frag = True
except ImportError:
can_decrypt_frag = False
from .fragment import FragmentFD
from .external import FFmpegFD
from ..compat import (
compat_urllib_error,
compat_urlparse,
compat_struct_pack,
)
from ..utils import (
parse_m3u8_attributes,
update_url_query,
)
class HlsFD(FragmentFD):
""" A limited implementation that does not require ffmpeg """
FD_NAME = 'hlsnative'
@staticmethod
def can_download(manifest, info_dict):
UNSUPPORTED_FEATURES = (
r'#EXT-X-KEY:METHOD=(?!NONE|AES-128)', # encrypted streams [1]
# r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]
# Live streams heuristic does not always work (e.g. geo restricted to Germany
# http://hls-geo.daserste.de/i/videoportal/Film/c_620000/622873/format,716451,716457,716450,716458,716459,.mp4.csmil/index_4_av.m3u8?null=0)
# r'#EXT-X-MEDIA-SEQUENCE:(?!0$)', # live streams [3]
# This heuristic also is not correct since segments may not be appended as well.
# Twitch vods of finished streams have EXT-X-PLAYLIST-TYPE:EVENT despite
# no segments will definitely be appended to the end of the playlist.
# r'#EXT-X-PLAYLIST-TYPE:EVENT', # media segments may be appended to the end of
# # event media playlists [4]
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.4
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2
# 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5
)
check_results = [not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES]
is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest
check_results.append(can_decrypt_frag or not is_aes128_enc)
check_results.append(not (is_aes128_enc and r'#EXT-X-BYTERANGE' in manifest))
check_results.append(not info_dict.get('is_live'))
return all(check_results)
def real_download(self, filename, info_dict):
man_url = info_dict['url']
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
man_url = urlh.geturl()
s = urlh.read().decode('utf-8', 'ignore')
if not self.can_download(s, info_dict):
if info_dict.get('extra_param_to_segment_url'):
self.report_error('pycrypto not found. Please install it.')
return False
self.report_warning(
'hlsnative has detected features it does not support, '
'extraction will be delegated to ffmpeg')
fd = FFmpegFD(self.ydl, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
return fd.real_download(filename, info_dict)
def anvato_ad(s):
return s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
media_frags = 0
ad_frags = 0
ad_frag_next = False
for line in s.splitlines():
line = line.strip()
if not line:
continue
if line.startswith('#'):
if anvato_ad(line):
ad_frags += 1
ad_frag_next = True
continue
if ad_frag_next:
ad_frag_next = False
continue
media_frags += 1
ctx = {
'filename': filename,
'total_frags': media_frags,
'ad_frags': ad_frags,
}
self._prepare_and_start_frag_download(ctx)
fragment_retries = self.params.get('fragment_retries', 0)
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
test = self.params.get('test', False)
extra_query = None
extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')
if extra_param_to_segment_url:
extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url)
i = 0
media_sequence = 0
decrypt_info = {'METHOD': 'NONE'}
byte_range = {}
frag_index = 0
ad_frag_next = False
for line in s.splitlines():
line = line.strip()
if line:
if not line.startswith('#'):
if ad_frag_next:
ad_frag_next = False
continue
frag_index += 1
if frag_index <= ctx['fragment_index']:
continue
frag_url = (
line
if re.match(r'^https?://', line)
else compat_urlparse.urljoin(man_url, line))
if extra_query:
frag_url = update_url_query(frag_url, extra_query)
count = 0
headers = info_dict.get('http_headers', {})
if byte_range:
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'])
while count <= fragment_retries:
try:
success, frag_content = self._download_fragment(
ctx, frag_url, info_dict, headers)
if not success:
return False
break
except compat_urllib_error.HTTPError as err:
# Unavailable (possibly temporary) fragments may be served.
# First we try to retry then either skip or abort.
# See https://github.com/rg3/youtube-dl/issues/10165,
# https://github.com/rg3/youtube-dl/issues/10448).
count += 1
if count <= fragment_retries:
self.report_retry_fragment(err, frag_index, count, fragment_retries)
if count > fragment_retries:
if skip_unavailable_fragments:
i += 1
media_sequence += 1
self.report_skip_fragment(frag_index)
continue
self.report_error(
'giving up after %s fragment retries' % fragment_retries)
return False
if decrypt_info['METHOD'] == 'AES-128':
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(decrypt_info['URI']).read()
frag_content = AES.new(
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
self._append_fragment(ctx, frag_content)
# We only download the first fragment during the test
if test:
break
i += 1
media_sequence += 1
elif line.startswith('#EXT-X-KEY'):
decrypt_url = decrypt_info.get('URI')
decrypt_info = parse_m3u8_attributes(line[11:])
if decrypt_info['METHOD'] == 'AES-128':
if 'IV' in decrypt_info:
decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32))
if not re.match(r'^https?://', decrypt_info['URI']):
decrypt_info['URI'] = compat_urlparse.urljoin(
man_url, decrypt_info['URI'])
if extra_query:
decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)
if decrypt_url != decrypt_info['URI']:
decrypt_info['KEY'] = None
elif line.startswith('#EXT-X-MEDIA-SEQUENCE'):
media_sequence = int(line[22:])
elif line.startswith('#EXT-X-BYTERANGE'):
splitted_byte_range = line[17:].split('@')
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']
byte_range = {
'start': sub_range_start,
'end': sub_range_start + int(splitted_byte_range[0]),
}
elif anvato_ad(line):
ad_frag_next = True
self._finish_frag_download(ctx)
return True
|
the-stack_0_23445 | import os
import yaml
import githubapimock as api
config_path = os.path.expanduser("~/repo.yml")
settings = yaml.load(open(config_path))
org = settings["org"]
repo = settings["repo"]
user = settings["user"]
token = settings["token"]
title = ""
while title != "q":
title = input("Title: ")
body = input("Body: ")
label = input("Label: ")
labels = [label]
issue_id = api.create_issue(org, repo, user, token, title, body)
print("Created issue " + str(issue_id) + " in " + org + "/" +repo)
|
the-stack_0_23447 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo.tests import tagged
@tagged('post_install', '-at_install')
class TestRepair(AccountingTestCase):
def setUp(self):
super(TestRepair, self).setUp()
self.Repair = self.env['repair.order']
self.ResUsers = self.env['res.users']
self.RepairMakeInvoice = self.env['repair.order.make_invoice']
self.res_group_user = self.env.ref('stock.group_stock_user')
self.res_group_manager = self.env.ref('stock.group_stock_manager')
self.repair_r0 = self.env.ref('repair.repair_r0')
self.repair_r1 = self.env.ref('repair.repair_r1')
self.repair_r2 = self.env.ref('repair.repair_r2')
self.res_repair_user = self.ResUsers.create({
'name': 'Repair User',
'login': 'maru',
'email': '[email protected]',
'groups_id': [(6, 0, [self.res_group_user.id])]})
self.res_repair_manager = self.ResUsers.create({
'name': 'Repair Manager',
'login': 'marm',
'email': '[email protected]',
'groups_id': [(6, 0, [self.res_group_manager.id])]})
def _create_simple_repair_order(self, invoice_method):
product_to_repair = self.env.ref('product.product_product_5')
partner = self.env.ref('base.res_partner_address_1')
return self.env['repair.order'].create({
'product_id': product_to_repair.id,
'product_uom': product_to_repair.uom_id.id,
'address_id': partner.id,
'guarantee_limit': datetime.today().strftime('%Y-%m-%d'),
'invoice_method': invoice_method,
'partner_invoice_id': partner.id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'partner_id': self.env.ref('base.res_partner_12').id
})
def _create_simple_operation(self, repair_id=False, qty=0.0, price_unit=0.0):
product_to_add = self.env.ref('product.product_product_5')
return self.env['repair.line'].create({
'name': 'Add The product',
'type': 'add',
'product_id': product_to_add.id,
'product_uom_qty': qty,
'product_uom': product_to_add.uom_id.id,
'price_unit': price_unit,
'repair_id': repair_id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': product_to_add.property_stock_production.id,
})
def _create_simple_fee(self, repair_id=False, qty=0.0, price_unit=0.0):
product_service = self.env.ref('product.product_product_2')
return self.env['repair.fee'].create({
'name': 'PC Assemble + Custom (PC on Demand)',
'product_id': product_service.id,
'product_uom_qty': qty,
'product_uom': product_service.uom_id.id,
'price_unit': price_unit,
'repair_id': repair_id,
})
def test_00_repair_afterinv(self):
repair = self._create_simple_repair_order('after_repair')
self._create_simple_operation(repair_id=repair.id, qty=1.0, price_unit=50.0)
# I confirm Repair order taking Invoice Method 'After Repair'.
repair.with_user(self.res_repair_user).action_repair_confirm()
# I check the state is in "Confirmed".
self.assertEqual(repair.state, "confirmed", 'Repair order should be in "Confirmed" state.')
repair.action_repair_start()
# I check the state is in "Under Repair".
self.assertEqual(repair.state, "under_repair", 'Repair order should be in "Under_repair" state.')
# Repairing process for product is in Done state and I end Repair process by clicking on "End Repair" button.
repair.action_repair_end()
# I define Invoice Method 'After Repair' option in this Repair order.so I create invoice by clicking on "Make Invoice" wizard.
make_invoice = self.RepairMakeInvoice.create({
'group': True})
# I click on "Create Invoice" button of this wizard to make invoice.
context = {
"active_model": 'repair_order',
"active_ids": [repair.id],
"active_id": repair.id
}
make_invoice.with_context(context).make_invoices()
# I check that invoice is created for this Repair order.
self.assertEqual(len(repair.invoice_id), 1, "No invoice exists for this repair order")
self.assertEqual(len(repair.move_id.move_line_ids[0].consume_line_ids), 1, "Consume lines should be set")
def test_01_repair_b4inv(self):
repair = self._create_simple_repair_order('b4repair')
# I confirm Repair order for Invoice Method 'Before Repair'.
repair.with_user(self.res_repair_user).action_repair_confirm()
# I click on "Create Invoice" button of this wizard to make invoice.
repair.action_repair_invoice_create()
# I check that invoice is created for this Repair order.
self.assertEqual(len(repair.invoice_id), 1, "No invoice exists for this repair order")
def test_02_repair_noneinv(self):
repair = self._create_simple_repair_order('none')
# Add a new fee line
self._create_simple_fee(repair_id=repair.id, qty=1.0, price_unit=12.0)
self.assertEqual(repair.amount_total, 12, "Amount_total should be 12")
# Add new operation line
self._create_simple_operation(repair_id=repair.id, qty=1.0, price_unit=14.0)
self.assertEqual(repair.amount_total, 26, "Amount_total should be 26")
# I confirm Repair order for Invoice Method 'No Invoice'.
repair.with_user(self.res_repair_user).action_repair_confirm()
# I start the repairing process by clicking on "Start Repair" button for Invoice Method 'No Invoice'.
repair.action_repair_start()
# I check its state which is in "Under Repair".
self.assertEqual(repair.state, "under_repair", 'Repair order should be in "Under_repair" state.')
# Repairing process for product is in Done state and I end this process by clicking on "End Repair" button.
repair.action_repair_end()
self.assertEqual(repair.move_id.location_id.id, self.env.ref('stock.stock_location_stock').id,
'Repaired product was taken in the wrong location')
self.assertEqual(repair.move_id.location_dest_id.id, self.env.ref('stock.stock_location_stock').id,
'Repaired product went to the wrong location')
self.assertEqual(repair.operations.move_id.location_id.id, self.env.ref('stock.stock_location_stock').id,
'Consumed product was taken in the wrong location')
self.assertEqual(repair.operations.move_id.location_dest_id.id, self.env.ref('product.product_product_5').property_stock_production.id,
'Consumed product went to the wrong location')
# I define Invoice Method 'No Invoice' option in this repair order.
# So, I check that Invoice has not been created for this repair order.
self.assertNotEqual(len(repair.invoice_id), 1, "Invoice should not exist for this repair order")
|
the-stack_0_23449 | from __future__ import absolute_import, division, print_function
import sys
from py._code.code import FormattedExcinfo
import py
import warnings
import inspect
import _pytest
from _pytest._code.code import TerminalRepr
from _pytest.compat import (
NOTSET, exc_clear, _format_args,
getfslineno, get_real_func,
is_generator, isclass, getimfunc,
getlocation, getfuncargnames,
safe_getattr,
)
from _pytest.runner import fail
from _pytest.compat import FuncargnamesCompatAttr
def pytest_sessionstart(session):
import _pytest.python
scopename2class.update({
'class': _pytest.python.Class,
'module': _pytest.python.Module,
'function': _pytest.main.Item,
})
session._fixturemanager = FixtureManager(session)
scopename2class = {}
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._fixture_values = {} # argname -> fixture value
self._fixture_defs = {} # argname -> FixtureDef
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def fixturenames(self):
# backward incompatible note: now a readonly property
return list(self._pyfuncitem._fixtureinfo.names_closure)
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfixturevalue(argname) usage which was naturally
# not known at parsing/collection time
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfixturevalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfixturevalue(self, argname):
""" Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def getfuncargvalue(self, argname):
""" Deprecated, use getfixturevalue. """
from _pytest import deprecated
warnings.warn(
deprecated.GETFUNCARGVALUE,
DeprecationWarning)
return self.getfixturevalue(argname)
def _get_active_fixturedef(self, argname):
try:
return self._fixture_defs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfixturevalue(fixturedef)
self._fixture_values[argname] = result
self._fixture_defs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfixturevalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
if fixturedef.params is not None:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = frameinfo.filename
source_lineno = frameinfo.lineno
source_path = py.path.local(source_path)
if source_path.relto(funcitem.config.rootdir):
source_path = source_path.relto(funcitem.config.rootdir)
msg = (
"The requested fixture has no parameter defined for the "
"current test.\n\nRequested fixture '{0}' defined in:\n{1}"
"\n\nRequested here:\n{2}:{3}".format(
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path,
source_lineno,
)
)
fail(msg)
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" % (
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" % (
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._fixture_values = request._fixture_values
self._fixture_defs = request._fixture_defs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
def scope2index(scope, descr, where=None):
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined.
"""
try:
return scopes.index(scope)
except ValueError:
raise ValueError(
"{0} {1}has an unsupported scope value '{2}'".format(
descr, 'from {0} '.format(where) if where else '',
scope)
)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
parentid = self.request._pyfuncitem.parent.nodeid
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parentid))
if faclist and name not in available:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(sorted(available)),)
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
lines = self.errorstring.split("\n")
if lines:
tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker,
lines[0].strip()), red=True)
for line in lines[1:]:
tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker,
line.strip()), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = _pytest._code.Source(fixturefunc)
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
it = fixturefunc(**kwargs)
res = next(it)
def teardown():
try:
next(it)
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scope2index(
scope or "function",
descr='fixture {0}'.format(func.__name__),
where=baseid
)
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
ihook = self._fixturemanager.session.ihook
ihook.pytest_fixture_post_finalizer(fixturedef=self)
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
ihook = self._fixturemanager.session.ihook
return ihook.pytest_fixture_setup(fixturedef=self, request=request)
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def pytest_fixture_setup(fixturedef, request):
""" Execution of fixture setup. """
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = fixturedef.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance)
my_cache_key = request.param_index
try:
result = call_fixture_func(fixturefunc, request, kwargs)
except Exception:
fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
raise
fixturedef.cached_result = (result, my_cache_key, None)
return result
class FixtureFunctionMarker:
def __init__(self, scope, params, autouse=False, ids=None, name=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.ids = ids
self.name = name
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or without parameters) to define a
fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module" or "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
Fixtures can optionally provide their values to test functions using a ``yield`` statement,
instead of ``return``. In this case, the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome. A fixture function must yield exactly once.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, name=name)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
if callable(scope) and params is None and not autouse:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, ids=ids, name=name)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
defaultfuncargprefixmarker = fixture()
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getatt() ignores such exceptions.
obj = safe_getattr(holderobj, name, None)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
from _pytest import deprecated
self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid)
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
if marker.name:
name = marker.name
msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \
'and be decorated with @pytest.fixture:\n%s' % name
assert not name.startswith(self._argprefix), msg
fixture_def = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
"""
Gets a list of fixtures which are applicable to the given node id.
:param str argname: name of the fixture to search for
:param str nodeid: full node id of the requesting test.
:return: list[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
|
the-stack_0_23453 | # coding: UTF-8
from urlparse import urlparse
import urllib, urllib2, httplib, requests
def my_urlparse(url):
try:
parsed = urlparse(url)
if parsed.port:
return 'scheme=%s, host=%s, port=%d' % (parsed.scheme, parsed.netloc, parsed.port)
else:
return 'scheme=%s, host=%s, port=' % (parsed.scheme, parsed.netloc)
except ValueError:
return 'err'
def my_httplib(url):
try:
conn = httplib.HTTPConnection(urlparse(url).netloc)
conn.request("GET", urlparse(url).path)
data = conn.getresponse().read().strip()
conn.close()
except Exception:
data = 'err'
return data
def my_urllib(url):
try:
return urllib.urlopen(url).read().strip()
except Exception:
return 'err'
def my_urllib2(url):
try:
return urllib2.urlopen(url).read().strip()
except Exception:
return 'err'
def my_requests(url):
try:
return requests.get(url).content.strip()
except Exception:
return 'err'
|
the-stack_0_23455 | """Python class for a distant object with, at most, proper motion."""
from numpy import array, cos, outer, sin
from .constants import AU_KM, ASEC2RAD, C, C_AUDAY, DAY_S, T0
from .functions import length_of
from .relativity import light_time_difference
from .timelib import Time
from .units import Angle
class Star(object):
"""The position in the sky of a star or other fixed object.
Each `Star` object specifies the position of a distant object. You
should provide as a right ascension and declination relative to the
ICRS (the recent improvement upon J2000). You can specify the
coordinates using either floating point hours and degrees, or tuples
that specify hour and degree fractions as minutes and seconds, or
even full Skyfield :class:`~skyfield.units.Angle` objects (which can
themselves be initialized using hours, degrees, or radians):
>>> barnard = Star(ra_hours=17.963471675, dec_degrees=4.69339088889)
>>> barnard = Star(ra_hours=(17, 57, 48.49), dec_degrees=(4, 41, 36.20))
>>> barnard = Star(ra=Angle(hours=17.963471675),
... dec=Angle(degrees=4.69339088889))
For objects whose proper motion across the sky has been detected,
you can supply velocities in milliarcseconds (mas) per year, and
even a parallax and radial velocity if those are known:
>>> barnard = Star(ra_hours=(17, 57, 48.49803),
... dec_degrees=(4, 41, 36.2072),
... ra_mas_per_year=-798.71,
... dec_mas_per_year=+10337.77,
... parallax_mas=545.4,
... radial_km_per_s=-110.6)
See `stars` for a guide to using a `Star` once you have created it.
"""
au_km = AU_KM
def __init__(self, ra=None, dec=None, ra_hours=None, dec_degrees=None,
ra_mas_per_year=0.0, dec_mas_per_year=0.0,
parallax_mas=0.0, radial_km_per_s=0.0, names=(), epoch=T0):
if ra_hours is not None:
self.ra = Angle(hours=ra_hours)
elif isinstance(ra, Angle):
self.ra = ra
else:
raise TypeError('please provide either ra_hours=<float> or else'
' ra=<skyfield.units.Angle object>')
if dec_degrees is not None:
self.dec = Angle(degrees=dec_degrees)
elif isinstance(dec, Angle):
self.dec = dec
else:
raise TypeError('please provide either dec_degrees=<float> or else'
' dec=<skyfield.units.Angle object>')
if isinstance(epoch, Time):
epoch = epoch.tdb
elif isinstance(epoch, float):
pass
else:
raise ValueError('the epoch= must be a Time object, or'
' a floating point Barycentric Dynamical Time (TDB)')
self.ra_mas_per_year = ra_mas_per_year
self.dec_mas_per_year = dec_mas_per_year
self.parallax_mas = parallax_mas
self.radial_km_per_s = radial_km_per_s
self.epoch = epoch
self.names = names
self._compute_vectors()
def __repr__(self):
opts = []
for name in ['ra_mas_per_year', 'dec_mas_per_year',
'parallax_mas', 'radial_km_per_s', 'names', 'epoch']:
value = getattr(self, name)
if value:
opts.append(', {0}={1!r}'.format(name, value))
return 'Star(ra_hours={0!r}, dec_degrees={1!r}{2})'.format(
self.ra.hours, self.dec.degrees, ''.join(opts))
def _observe_from_bcrs(self, observer):
position, velocity = self._position_au, self._velocity_au_per_d
t = observer.t
dt = light_time_difference(position, observer.position.au)
if t.shape:
position = (outer(velocity, t.tdb + dt - self.epoch).T + position).T
else:
position = position + velocity * (t.tdb + dt - self.epoch)
vector = position - observer.position.au
distance = length_of(vector)
light_time = distance / C_AUDAY
return vector, (observer.velocity.au_per_d.T - velocity).T, light_time
def _compute_vectors(self):
"""Compute the star's position as an ICRF position and velocity."""
# Use 1 gigaparsec for stars whose parallax is zero.
parallax = self.parallax_mas
if parallax <= 0.0:
parallax = 1.0e-6
# Convert right ascension, declination, and parallax to position
# vector in equatorial system with units of au.
dist = 1.0 / sin(parallax * 1.0e-3 * ASEC2RAD)
r = self.ra.radians
d = self.dec.radians
cra = cos(r)
sra = sin(r)
cdc = cos(d)
sdc = sin(d)
self._position_au = array((
dist * cdc * cra,
dist * cdc * sra,
dist * sdc,
))
# Compute Doppler factor, which accounts for change in light
# travel time to star.
k = 1.0 / (1.0 - self.radial_km_per_s / C * 1000.0)
# Convert proper motion and radial velocity to orthogonal
# components of motion with units of au/day.
pmr = self.ra_mas_per_year / (parallax * 365.25) * k
pmd = self.dec_mas_per_year / (parallax * 365.25) * k
rvl = self.radial_km_per_s * DAY_S / self.au_km * k
# Transform motion vector to equatorial system.
self._velocity_au_per_d = array((
- pmr * sra - pmd * sdc * cra + rvl * cdc * cra,
pmr * cra - pmd * sdc * sra + rvl * cdc * sra,
pmd * cdc + rvl * sdc,
))
|
the-stack_0_23456 | # Implement int sqrt(int x).
# Compute and return the square root of x, where x is guaranteed to be a non-negative integer.
# Since the return type is an integer, the decimal digits are truncated and only the integer part of the result is returned.
# Example 1:
# Input: 4
# Output: 2
# Example 2:
# Input: 8
# Output: 2
# Explanation: The square root of 8 is 2.82842..., and since
# the decimal part is truncated, 2 is returned.
class Solution:
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
start = 0
end = x
if x == 1:
return 1
while start < end - 1:
while (int((end - start) / 2) + start)**2 > x and start < end - 1:
end = int((end - start) / 2) + start
start = int((end - start) / 2) + start
return start |
the-stack_0_23460 | #!/bin/python
import os
import sys
compiler_name = "g++"
options = ""
output_name = "mash"
libs = []
lib_paths = []
includes = []
excludes = {}
if os.name == 'nt':
excludes["io-linux.cpp"] = True
includes.extend((
"C:\\Users\\Jack\\source\\freetype-2.10.2\\include",
"C:\\Users\\Jack\\source\\glfw-3.3.4.bin.WIN64\\include",
"C:\\VulkanSDK\\1.2.135.0\\Include"
))
libs.extend(
("freetype", "glfw3", "vulkan-1", "kernel32", "user32", "shell32", "gdi32", "vcruntime", "msvcrt", "msvcprt", "ucrt")
)
lib_paths.extend((
"C:\\Users\\Jack\\source\\freetype-2.10.2\\win64",
"C:\\Users\\Jack\\source\\glfw-3.3.4.bin.WIN64\\lib-vc2019",
"C:\\VulkanSDK\\1.2.135.0\\Lib"
))
output_name = "mash.exe"
compiler_name = "clang"
options += "-Xlinker /NODEFAULTLIB -Xlinker /SUBSYSTEM:windows -Xlinker /ENTRY:mainCRTStartup"
else:
excludes["io-windows.cpp"] = True
includes.append("/usr/include/freetype2")
libs.extend(("freetype", "glfw", "vulkan"))
cpp_list = []
for l in os.listdir("."):
if os.path.isfile(l) and l[-4:] == ".cpp" and l not in excludes:
cpp_list.append(l)
include_string = ""
for i in includes:
include_string += "-I" + i + " "
libs_string = ""
for l in libs:
libs_string += "-l" + l + " "
lib_paths_string = ""
for l in lib_paths:
lib_paths_string += "-L" + l + " "
os.system("{0} {1} -std=c++17 {2} {3} {4} {5} -o {6}".format(compiler_name, options, include_string, lib_paths_string, libs_string, " ".join(cpp_list), output_name))
|
the-stack_0_23462 | import braintree
from braintree.error_result import ErrorResult
from braintree.successful_result import SuccessfulResult
from braintree.exceptions.not_found_error import NotFoundError
from braintree.oauth_credentials import OAuthCredentials
from braintree.util import Crypto
import sys
if sys.version_info[0] == 2:
from urllib import quote_plus
else:
from urllib.parse import quote_plus
from functools import reduce
class OAuthGateway(object):
def __init__(self, gateway):
self.gateway = gateway
self.config = gateway.config
def create_token_from_code(self, params):
params["grant_type"] = "authorization_code"
return self._create_token(params)
def create_token_from_refresh_token(self, params):
params["grant_type"] = "refresh_token"
return self._create_token(params)
def revoke_access_token(self, access_token):
self.config.assert_has_client_credentials()
response = self.config.http().post("/oauth/revoke_access_token", {
"token": access_token
})
if "result" in response and response["result"]["success"]:
return SuccessfulResult
else:
return ErrorResult(self.gateway, "could not revoke access token")
def _create_token(self, params):
self.config.assert_has_client_credentials()
response = self.config.http().post("/oauth/access_tokens", {
"credentials": params
})
if "credentials" in response:
return SuccessfulResult({"credentials": OAuthCredentials(self.gateway, response["credentials"])})
else:
return ErrorResult(self.gateway, response["api_error_response"])
def connect_url(self, params):
params["client_id"] = self.config.client_id
user_params = self._sub_query(params, "user")
business_params = self._sub_query(params, "business")
def clean_values(accumulator, kv_pair):
key, value = kv_pair
if isinstance(value, list):
accumulator += [(key + "[]", v) for v in value]
else:
accumulator += [(key, value)]
return accumulator
params = reduce(clean_values, params.items(), [])
query = params + user_params + business_params
query_string = "&".join(quote_plus(key) + "=" + quote_plus(value) for key, value in query)
url = self.config.environment.base_url + "/oauth/connect?" + query_string
signature = self._compute_signature(url)
return url + "&signature=" + signature + "&algorithm=SHA256"
def _sub_query(self, params, root):
if root in params:
sub_query = params.pop(root)
else:
sub_query = {}
query = [(root + "[" + key + "]", str(value)) for key, value in sub_query.items()]
return query
def _compute_signature(self, url):
return Crypto.sha256_hmac_hash(self.config.client_secret, url)
|
the-stack_0_23463 | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty, BooleanProperty
from kivy.lang import Builder
Builder.load_string('''
<FxDialog@Popup>
id: popup
title: 'Fiat Currency'
size_hint: 0.8, 0.8
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.1
Label:
text: _('Currency')
height: '48dp'
Spinner:
height: '48dp'
id: ccy
on_text: popup.on_currency(self.text)
Widget:
size_hint: 1, 0.05
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Label:
text: _('History rates')
CheckBox:
id:hist
active: popup.has_history_rates
on_active: popup.on_checkbox_history(self.active)
Widget:
size_hint: 1, 0.05
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.1
Label:
text: _('Source')
height: '48dp'
Spinner:
height: '48dp'
id: exchanges
on_text: popup.on_exchange(self.text)
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback()
popup.dismiss()
''')
from kivy.uix.label import Label
from kivy.uix.checkbox import CheckBox
from kivy.uix.widget import Widget
from kivy.clock import Clock
from electrum_redd.gui.kivy.i18n import _
from functools import partial
class FxDialog(Factory.Popup):
def __init__(self, app, plugins, config, callback):
self.app = app
self.config = config
self.callback = callback
self.fx = self.app.fx
if self.fx.get_history_config(allow_none=True) is None:
# If nothing is set, force-enable it. (Note that as fiat rates itself
# are disabled by default, it is enough to set this here. If they
# were enabled by default, this would be too late.)
self.fx.set_history_config(True)
self.has_history_rates = self.fx.get_history_config()
Factory.Popup.__init__(self)
self.add_currencies()
def add_exchanges(self):
ex = self.ids.exchanges
if self.fx.is_enabled():
exchanges = sorted(self.fx.get_exchanges_by_ccy(self.fx.get_currency(), self.has_history_rates))
mx = self.fx.exchange.name()
if mx in exchanges:
ex.text = mx
elif exchanges:
ex.text = exchanges[0]
else:
ex.text = ''
else:
exchanges = []
ex.text = ''
ex.values = exchanges
def on_exchange(self, text):
if not text:
return
if self.fx.is_enabled() and text != self.fx.exchange.name():
self.fx.set_exchange(text)
def add_currencies(self):
currencies = [_('None')] + self.fx.get_currencies(self.has_history_rates)
my_ccy = self.fx.get_currency() if self.fx.is_enabled() else _('None')
self.ids.ccy.values = currencies
self.ids.ccy.text = my_ccy
def on_checkbox_history(self, checked):
self.fx.set_history_config(checked)
self.has_history_rates = checked
self.add_currencies()
self.on_currency(self.ids.ccy.text)
def on_currency(self, ccy):
b = (ccy != _('None'))
self.fx.set_enabled(b)
if b:
if ccy != self.fx.get_currency():
self.fx.set_currency(ccy)
self.app.fiat_unit = ccy
else:
self.app.is_fiat = False
Clock.schedule_once(lambda dt: self.add_exchanges())
|
the-stack_0_23464 | def cleanup_social_account(backend, uid, user=None, *args, **kwargs):
"""
3rd party: python-social-auth.
Social auth pipeline to cleanup the user's data. Must be placed after 'social_core.pipeline.user.create_user'.
"""
if user and kwargs.get('is_new', False):
user.first_name = kwargs['details']['first_name']
user.last_name = kwargs['details']['last_name']
user.save()
return {'user': user}
|
the-stack_0_23465 | import FWCore.ParameterSet.Config as cms
process = cms.Process("HLTSMPOfflineAnalysis")
process.load("HLTriggerOffline.SMP.SMPValidation_cff")
process.load("DQMServices.Components.MEtoEDMConverter_cfi")
hltProcessName = "HLT"
process.hltSMPValidator.hltProcessName = hltProcessName
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = cms.string(autoCond['startup'])
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_7_3_0_pre1/RelValH130GGgluonfusion_13/GEN-SIM-RECO/PRE_LS172_V15-v1/00000/A8F284E4-FC59-E411-8934-0025905A48D0.root',
'/store/relval/CMSSW_7_3_0_pre1/RelValH130GGgluonfusion_13/GEN-SIM-RECO/PRE_LS172_V15-v1/00000/F2BA47E7-FC59-E411-9031-0025905964B4.root'
),
secondaryFileNames = cms.untracked.vstring(
)
)
process.DQMStore = cms.Service("DQMStore")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 2000
process.MessageLogger.destinations += ['SMPValidationMessages']
process.MessageLogger.categories += ['SMPValidation']
process.MessageLogger.debugModules += ['*']#HLTHiggsValidator','HLTHiggsSubAnalysis','HLTHiggsPlotter']
process.MessageLogger.SMPValidationMessages = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG'),
default = cms.untracked.PSet(limit = cms.untracked.int32(0)),
SMPValidation = cms.untracked.PSet(limit = cms.untracked.int32(1000))
)
process.out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring(
'drop *',
'keep *_MEtoEDMConverter_*_HLTSMPOfflineAnalysis'),
fileName = cms.untracked.string('hltSMPValidator.root')
)
process.analyzerpath = cms.Path(
process.hltSMPValidator *
process.MEtoEDMConverter
)
process.outpath = cms.EndPath(process.out)
|
the-stack_0_23466 | from polyphony import testbench
class C:
def __init__(self, x):
self.x = x
class D:
def __init__(self, c):
self.c = c
def alias02(x):
c0 = C(x)
c1 = C(x*x)
d = D(c0)
result0 = d.c.x == x
d.c = c1
result1 = d.c.x == x*x
c1.x = 0
result2 = d.c.x == 0
d.c = c0
result3 = d.c.x == x
return result0 and result1 and result2 and result3
@testbench
def test():
assert alias02(1)
assert alias02(2)
assert alias02(3)
test()
|
the-stack_0_23467 | from django.conf.urls import url
from django.shortcuts import redirect
from django.urls import reverse
from djangocms_moderation import admin as moderation_admin
from djangocms_moderation.models import ModerationCollection, ModerationRequest
from djangocms_versioning import admin
from djangocms_content_expiry.constants import CONTENT_EXPIRY_EXPIRE_FIELD_LABEL
from djangocms_content_expiry.models import ContentExpiry
def expires(self, obj):
version = ContentExpiry.objects.filter(version=obj.pk)
if version:
return version[0].expires
return ""
expires.short_description = CONTENT_EXPIRY_EXPIRE_FIELD_LABEL
admin.VersionAdmin.expire = expires
def get_list_display(func):
"""
Register the expires field with the Versioning Admin
"""
def inner(self, request):
list_display = func(self, request)
created_by_index = list_display.index('created_by')
return list_display[:created_by_index] + ('expire',) + list_display[created_by_index:]
return inner
admin.VersionAdmin.get_list_display = get_list_display(admin.VersionAdmin.get_list_display)
def _get_urls(func):
"""
Add custom Version Lock urls to Versioning urls
"""
def inner(self, *args, **kwargs):
url_list = func(self, *args, **kwargs)
info = self.model._meta.app_label, self.model._meta.model_name
url_list.insert(0, url(
r'^copy/',
self.admin_site.admin_view(self.copy_content_expiry_view),
name="{}_{}_copy".format(*info),
))
return url_list
return inner
moderation_admin.ModerationRequestTreeAdmin.get_urls = _get_urls(moderation_admin.ModerationRequestTreeAdmin.get_urls)
def copy_content_expiry_view(self, request):
collection_id = request.GET.getlist("collection__id")
collection_id = int(collection_id[0])
moderation_request_id = request.GET.getlist("moderation_request__id")
moderation_request_id = int(moderation_request_id[0])
if collection_id and moderation_request_id:
collection = ModerationCollection.objects.get(id=collection_id)
moderation_request = ModerationRequest.objects.get(id=moderation_request_id)
version = moderation_request.version
redirect_url = reverse('admin:djangocms_moderation_moderationrequesttreenode_changelist')
redirect_url = "{}?moderation_request__collection__id={}".format(
redirect_url,
collection_id
)
if hasattr(version, "contentexpiry"):
content_expiry = version.contentexpiry
for mr in collection.moderation_requests.all():
mr_version = mr.version
if hasattr(mr_version, "contentexpiry"):
mr_content_expiry = mr_version.contentexpiry
mr_content_expiry.expires = content_expiry.expires
mr_content_expiry.save()
else:
ContentExpiry.objects.create(
created_by=request.user,
version=mr_version,
expires=content_expiry.expires,
)
return redirect(redirect_url)
moderation_admin.ModerationRequestTreeAdmin.copy_content_expiry_view = copy_content_expiry_view
|
the-stack_0_23469 | from os import environ as env
import multiprocessing
# Port to bind to
bind = f":{int(env.get('PORT', 5003))}"
# Number of processes to launch
workers = int(env.get('WORKERS', multiprocessing.cpu_count()))
# Number of concurrent handled connections
threads = int(env.get('THREADS', 4))
worker_connections = int(env.get('WORKER_CONNECTIONS', '1000'))
# Recycle the process after X request randomized by the jitter
max_requests = int(env.get('MAX_REQUESTS', '1000'))
max_requests_jitter = int(env.get('MAX_REQUESTS_JITTER', '100'))
# Connection timeouts
# - Defaults to double what the poll length for services should be
graceful_timeout = int(env.get('GRACEFUL_TIMEOUT', '60'))
timeout = int(env.get('TIMEOUT', '60'))
|
the-stack_0_23474 | import numpy as np
#from code_efficace import *
from Planet import Planet
#Définition d'une fonction pour évaluer la valeur absolue d'une liste
abs_liste = np.vectorize(abs)
def initialize_list(dist_max, nbr_planetes, masse_moyenne, vitesse_moyenne, moment_ang_moyen, masse_terre = 5.9722*(10)**24 , rayon_terre = 6378.137 *(10)**3):
densitee_terre = (masse_terre)/((4*np.pi*rayon_terre**3)/3)
#########################################
# Définition de la liste de planètes #
#########################################
# 1) Masse:
masse = np.array(abs_liste(np.random.normal(masse_moyenne, masse_moyenne/3, nbr_planetes)))
masse = masse * (masse_moyenne/masse.mean())
# 2) Rayon:
rayon = [(((3*m)/(densitee_terre * 4 * np.pi))**(1/3))/150 for m in masse]
# 3) Position
dist = np.random.rand(nbr_planetes) * dist_max
angle = np.random.rand(nbr_planetes) * 2 * np.pi
x = [ d * np.cos(theta) for d,theta in zip(dist,angle) ]
y = [ d * np.sin(theta) for d,theta in zip(dist,angle) ]
# 4) Vitesse
vitesse = np.random.normal(vitesse_moyenne, vitesse_moyenne/3, nbr_planetes)
vitesse = vitesse * (vitesse_moyenne/vitesse.mean())
angle2 = np.random.rand(nbr_planetes) * 2 * np.pi
#Définir les vitesse associées
vx = [v*np.cos(theta2) for v,theta2 in zip(vitesse,angle2)]
vy = [v*np.sin(theta2) for v,theta2 in zip(vitesse,angle2)]
# 6) Création des planètes
liste_planetes = [Planet(masse, rayon, x, y, vx, vy, '{}'.format(i)) for masse,rayon,x,y,vx,vy,i in zip(masse,rayon,x,y,vx,vy,range(1,len(masse)+1))]
return liste_planetes
|
the-stack_0_23475 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from __future__ import print_function
import argparse
import dace
import numpy as np
import networkx as nx
E = dace.symbol('E')
V = dace.symbol('V')
@dace.program
def shiloach_vishkin(EL, comp):
flag_hook = dace.define_local_scalar(dace.int32)
with dace.tasklet:
out >> flag_hook
out = 1
for v in dace.map[0:V]:
with dace.tasklet:
out >> comp[v]
out = v
while flag_hook:
with dace.tasklet:
out >> flag_hook
out = 0
for e in dace.map[0:2 * E]:
with dace.tasklet:
u << EL[e, 0]
v << EL[e, 1]
parents << comp(3)[:]
out >> comp(1)[:]
f >> flag_hook(-1)
pu = parents[u]
pv = parents[v]
ppv = parents[pv]
if pu < pv and pv == ppv:
out[ppv] = pu
f = 1
# Multi-jump version
for v in dace.map[0:V]:
with dace.tasklet:
inp << comp(-1)[0:v + 1]
out >> comp(-1)[v]
p = inp[v]
pp = inp[p]
while p != pp:
out = pp
p = pp
pp = inp[p]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("edges", type=int, nargs="?", default=17)
parser.add_argument("vertices", type=int, nargs="?", default=16)
parser.add_argument("-seed", type=int, nargs="?", default=None)
args = vars(parser.parse_args())
E.set(args['edges'])
V.set(args['vertices'])
print('Connected Components (Shiloach-Vishkin) E=%d, V=%d' % (E.get(), V.get()))
graph = nx.gnm_random_graph(V.get(), E.get(), seed=args['seed'])
comp = np.arange(0, V.get(), dtype=np.uint64)
EL = dace.ndarray([2 * E, 2], dace.uint64)
EL[:E.get()] = np.array([[u, v] for u, v, d in nx.to_edgelist(graph)], dtype=np.uint64)
EL[E.get():] = np.array([[v, u] for u, v, d in nx.to_edgelist(graph)], dtype=np.uint64)
shiloach_vishkin(EL, comp, E=E, V=V)
cc = nx.number_connected_components(graph)
diff = abs(cc - len(np.unique(comp)))
print("Difference:", diff, '(SV:', len(np.unique(comp)), ', NX:', cc, ')')
print("==== Program end ====")
exit(0 if diff == 0 else 1)
|
the-stack_0_23478 | # the indices of the winning positions.
WINNERS = set()
WINNERS.add((0,1,2))
WINNERS.add((3,4,5))
WINNERS.add((6,7,8))
WINNERS.add((0,3,6))
WINNERS.add((1,4,7))
WINNERS.add((2,5,8))
WINNERS.add((0,4,8))
WINNERS.add((2,4,6))
# "a discount factor... used to balance immediate and future reward."
# the internet says it should be between .8 and 1, but .1 seems to be good.
GAMMA = .1
# the rate to learn. should be a fraction between 0 and 1.
LEARNING_RATE = .01
# number of training epochs
EPOCHS = 1000000
# the percent you want to explore while training
EPSILON = 0.5
# for replaying the game
AFFIRMATIVE = ['y', 'yes', 'yeah', "yea", 'ye']
|
the-stack_0_23480 | # -*- coding: utf-8 -*-
"""
jishaku.repl.disassembly
~~~~~~~~~~~~~~~~~~~~~~~~
Functions pertaining to the disassembly of Python code
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import ast
import dis
import import_expression
from jishaku.repl.scope import Scope
CORO_CODE = """
import asyncio
import discord
from discord.ext import commands
from importlib import import_module as {0}
import jishaku
async def _repl_coroutine({{0}}):
pass
""".format(import_expression.constants.IMPORTER)
def wrap_code(code: str, args: str = '') -> ast.Module:
"""
Wraps code for disassembly.
This is similar in function to the jishaku.repl.compilation equivalent,
but due to the different structure required for clean disassemblies,
it's implemented separately here.
"""
user_code = import_expression.parse(code, mode='exec')
mod = import_expression.parse(CORO_CODE.format(args), mode='exec')
definition = mod.body[-1] # async def ...:
assert isinstance(definition, ast.AsyncFunctionDef)
# Patch user code directly into the function
definition.body = user_code.body
ast.fix_missing_locations(mod)
# We do not use the keyword transformer here, since it might produce misleading disassembly.
is_asyncgen = any(isinstance(node, ast.Yield) for node in ast.walk(definition))
last_expr = definition.body[-1]
# if the last part isn't an expression, ignore it
if not isinstance(last_expr, ast.Expr):
return mod
# if this isn't a generator and the last expression is not a return
if not is_asyncgen and not isinstance(last_expr.value, ast.Return):
# copy the value of the expression into a return
return_stmt = ast.Return(last_expr.value)
ast.copy_location(return_stmt, last_expr)
# place the return where the original expression was
definition.body[-1] = return_stmt
return mod
def disassemble(code: str, scope: Scope = None, arg_dict: dict = None):
"""
Disassembles asynchronous code into dis.dis-style bytecode instructions.
"""
# Similar to AsyncCodeExecutor.__init__
arg_names = list(arg_dict.keys()) if arg_dict else []
scope = scope or Scope()
wrapped = wrap_code(code, args=', '.join(arg_names))
exec(compile(wrapped, '<repl>', 'exec'), scope.globals, scope.locals) # pylint: disable=exec-used
func_def = scope.locals.get('_repl_coroutine') or scope.globals['_repl_coroutine']
# pylint is gonna really hate this part onwards
# pylint: disable=protected-access, invalid-name
co = func_def.__code__
for instruction in dis._get_instructions_bytes(
co.co_code, co.co_varnames, co.co_names, co.co_consts,
co.co_cellvars + co.co_freevars, dict(dis.findlinestarts(co)),
line_offset=0
):
if instruction.starts_line is not None and instruction.offset > 0:
yield ''
yield instruction._disassemble(
4, False, 4
)
# pylint: enable=protected-access, invalid-name
|
the-stack_0_23481 | import base64
import datetime
import json
import logging
import re
import time
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
from urllib.parse import urljoin
import pytz
import requests
from flask import Response as FlaskResponse
from jsonschema import ValidationError, validate
from moto.apigateway.models import apigateway_backends
from requests.models import Response
from localstack import config
from localstack.constants import (
APPLICATION_JSON,
HEADER_LOCALSTACK_EDGE_URL,
LOCALHOST_HOSTNAME,
PATH_USER_REQUEST,
TEST_AWS_ACCOUNT_ID,
)
from localstack.services.apigateway import helpers
from localstack.services.apigateway.helpers import (
API_REGIONS,
PATH_REGEX_AUTHORIZERS,
PATH_REGEX_CLIENT_CERTS,
PATH_REGEX_DOC_PARTS,
PATH_REGEX_PATH_MAPPINGS,
PATH_REGEX_RESPONSES,
PATH_REGEX_TEST_INVOKE_API,
PATH_REGEX_VALIDATORS,
extract_path_params,
extract_query_string_params,
get_cors_response,
get_resource_for_path,
handle_accounts,
handle_authorizers,
handle_base_path_mappings,
handle_client_certificates,
handle_documentation_parts,
handle_gateway_responses,
handle_validators,
handle_vpc_links,
make_error_response,
)
from localstack.services.awslambda import lambda_api
from localstack.services.generic_proxy import ProxyListener
from localstack.services.kinesis import kinesis_listener
from localstack.services.stepfunctions.stepfunctions_utils import await_sfn_execution_result
from localstack.utils import common
from localstack.utils.analytics import event_publisher
from localstack.utils.aws import aws_responses, aws_stack
from localstack.utils.aws.aws_responses import (
LambdaResponse,
flask_to_requests_response,
parse_query_string,
request_response_stream,
requests_response,
)
from localstack.utils.aws.request_context import MARKER_APIGW_REQUEST_REGION, THREAD_LOCAL
from localstack.utils.common import (
camel_to_snake_case,
json_safe,
long_uid,
to_bytes,
to_str,
try_json,
)
# set up logger
from localstack.utils.http_utils import add_query_params_to_url
LOG = logging.getLogger(__name__)
# target ARN patterns
TARGET_REGEX_S3_URI = (
r"^arn:aws:apigateway:[a-zA-Z0-9\-]+:s3:path/(?P<bucket>[^/]+)/(?P<object>.+)$"
)
# regex path pattern for user requests
PATH_REGEX_USER_REQUEST = (
r"^/restapis/([A-Za-z0-9_\-]+)(?:/([A-Za-z0-9_\-]+))?/%s/(.*)$" % PATH_USER_REQUEST
)
# URL pattern for invocations
HOST_REGEX_EXECUTE_API = (
r"(?:.*://)?([a-zA-Z0-9-]+)\.execute-api\.(%s|([^\.]+)\.amazonaws\.com)(.*)"
% LOCALHOST_HOSTNAME
)
REQUEST_TIME_DATE_FORMAT = "%d/%b/%Y:%H:%M:%S %z"
class ApiGatewayVersion(Enum):
V1 = "v1"
V2 = "v2"
# type definition for data parameters (i.e., invocation payloads)
InvocationPayload = Union[Dict, str, bytes]
class AuthorizationError(Exception):
pass
class ApiInvocationContext:
"""Represents the context for an incoming API Gateway invocation."""
# basic (raw) HTTP invocation details (method, path, data, headers)
method: str
path: str
data: InvocationPayload
headers: Dict[str, str]
# invocation context
context: Dict[str, Any]
# authentication info for this invocation
auth_info: Dict[str, Any]
# target API/resource details extracted from the invocation
apigw_version: ApiGatewayVersion
api_id: str
stage: str
region_name: str
# resource path, including any path parameter placeholders (e.g., "/my/path/{id}")
resource_path: str
integration: Dict
resource: Dict
# Invocation path with query string, e.g., "/my/path?test". Defaults to "path", can be used
# to overwrite the actual API path, in case the path format "../_user_request_/.." is used.
_path_with_query_string: str
# response templates to be applied to the invocation result
response_templates: Dict
route: Dict
connection_id: str
path_params: Dict
# response object
response: Response
def __init__(
self,
method,
path,
data,
headers,
api_id=None,
stage=None,
context=None,
auth_info=None,
):
self.method = method
self.path = path
self.data = data
self.headers = headers
self.context = {} if context is None else context
self.auth_info = {} if auth_info is None else auth_info
self.apigw_version = None
self.api_id = api_id
self.stage = stage
self.region_name = None
self.integration = None
self.resource = None
self.resource_path = None
self.path_with_query_string = None
self.response_templates = {}
@property
def resource_id(self) -> Optional[str]:
return (self.resource or {}).get("id")
@property
def invocation_path(self) -> str:
"""Return the plain invocation path, without query parameters."""
path = self.path_with_query_string or self.path
return path.split("?")[0]
@property
def path_with_query_string(self) -> str:
"""Return invocation path with query string - defaults to the value of 'path', unless customized."""
return self._path_with_query_string or self.path
@path_with_query_string.setter
def path_with_query_string(self, new_path: str):
"""Set a custom invocation path with query string (used to handle "../_user_request_/.." paths)."""
self._path_with_query_string = new_path
def query_params(self) -> Dict:
"""Extract the query parameters from the target URL or path in this request context."""
query_string = self.path_with_query_string.partition("?")[2]
return parse_query_string(query_string)
@property
def integration_uri(self) -> Optional[str]:
integration = self.integration or {}
return integration.get("uri") or integration.get("integrationUri")
@property
def auth_context(self) -> Optional[Dict]:
if isinstance(self.auth_info, dict):
context = self.auth_info.setdefault("context", {})
principal = self.auth_info.get("principalId")
if principal:
context["principalId"] = principal
return context
@property
def auth_identity(self) -> Optional[Dict]:
if isinstance(self.auth_info, dict):
if self.auth_info.get("identity") is None:
self.auth_info["identity"] = {}
return self.auth_info["identity"]
def is_websocket_request(self):
upgrade_header = str(self.headers.get("upgrade") or "")
return upgrade_header.lower() == "websocket"
def is_v1(self):
"""Whether this is an API Gateway v1 request"""
return self.apigw_version == ApiGatewayVersion.V1
class ProxyListenerApiGateway(ProxyListener):
def forward_request(self, method, path, data, headers):
invocation_context = ApiInvocationContext(method, path, data, headers)
forwarded_for = headers.get(HEADER_LOCALSTACK_EDGE_URL, "")
if re.match(PATH_REGEX_USER_REQUEST, path) or "execute-api" in forwarded_for:
result = invoke_rest_api_from_request(invocation_context)
if result is not None:
return result
data = data and json.loads(to_str(data))
if re.match(PATH_REGEX_AUTHORIZERS, path):
return handle_authorizers(method, path, data, headers)
if re.match(PATH_REGEX_DOC_PARTS, path):
return handle_documentation_parts(method, path, data, headers)
if re.match(PATH_REGEX_VALIDATORS, path):
return handle_validators(method, path, data, headers)
if re.match(PATH_REGEX_RESPONSES, path):
return handle_gateway_responses(method, path, data, headers)
if re.match(PATH_REGEX_PATH_MAPPINGS, path):
return handle_base_path_mappings(method, path, data, headers)
if is_test_invoke_method(method, path):
# if call is from test_invoke_api then use http_method to find the integration,
# as test_invoke_api makes a POST call to request the test invocation
match = re.match(PATH_REGEX_TEST_INVOKE_API, path)
invocation_context.method = match[3]
if data:
orig_data = data
path_with_query_string = orig_data.get("pathWithQueryString", None)
if path_with_query_string:
invocation_context.path_with_query_string = path_with_query_string
invocation_context.data = data.get("body")
invocation_context.headers = orig_data.get("headers", {})
result = invoke_rest_api_from_request(invocation_context)
result = {
"status": result.status_code,
"body": to_str(result.content),
"headers": dict(result.headers),
}
return result
return True
def return_response(self, method, path, data, headers, response):
# fix backend issue (missing support for API documentation)
if re.match(r"/restapis/[^/]+/documentation/versions", path):
if response.status_code == 404:
return requests_response({"position": "1", "items": []})
# add missing implementations
if response.status_code == 404:
data = data and json.loads(to_str(data))
result = None
if path == "/account":
result = handle_accounts(method, path, data, headers)
elif path.startswith("/vpclinks"):
result = handle_vpc_links(method, path, data, headers)
elif re.match(PATH_REGEX_CLIENT_CERTS, path):
result = handle_client_certificates(method, path, data, headers)
if result is not None:
response.status_code = 200
aws_responses.set_response_content(response, result, getattr(result, "headers", {}))
# keep track of API regions for faster lookup later on
if method == "POST" and path == "/restapis":
content = json.loads(to_str(response.content))
api_id = content["id"]
region = aws_stack.extract_region_from_auth_header(headers)
API_REGIONS[api_id] = region
# publish event
if method == "POST" and path == "/restapis":
content = json.loads(to_str(response.content))
event_publisher.fire_event(
event_publisher.EVENT_APIGW_CREATE_API,
payload={"a": event_publisher.get_hash(content["id"])},
)
api_regex = r"^/restapis/([a-zA-Z0-9\-]+)$"
if method == "DELETE" and re.match(api_regex, path):
api_id = re.sub(api_regex, r"\1", path)
event_publisher.fire_event(
event_publisher.EVENT_APIGW_DELETE_API,
payload={"a": event_publisher.get_hash(api_id)},
)
class RequestValidator:
__slots__ = ["context", "apigateway_client"]
def __init__(self, context: ApiInvocationContext, apigateway_client):
self.context = context
self.apigateway_client = apigateway_client
def is_request_valid(self) -> bool:
# make all the positive checks first
if self.context.resource is None or "resourceMethods" not in self.context.resource:
return True
resource_methods = self.context.resource["resourceMethods"]
if self.context.method not in resource_methods:
return True
# check if there is validator for the resource
resource = resource_methods[self.context.method]
if not (resource.get("requestValidatorId") or "").strip():
return True
# check if there is a validator for this request
validator = self.apigateway_client.get_request_validator(
restApiId=self.context.api_id, requestValidatorId=resource["requestValidatorId"]
)
if validator is None:
return True
# are we validating the body?
if self.should_validate_body(validator):
is_body_valid = self.validate_body(resource)
if not is_body_valid:
return is_body_valid
if self.should_validate_request(validator):
is_valid_parameters = self.validate_parameters_and_headers(resource)
if not is_valid_parameters:
return is_valid_parameters
return True
def validate_body(self, resource):
# we need a model to validate the body
if "requestModels" not in resource or not resource["requestModels"]:
return False
schema_name = resource["requestModels"].get(APPLICATION_JSON)
model = self.apigateway_client.get_model(
restApiId=self.context.api_id,
modelName=schema_name,
)
if not model:
return False
try:
validate(instance=json.loads(self.context.data), schema=json.loads(model["schema"]))
return True
except ValidationError as e:
LOG.warning("failed to validate request body", e)
return False
# TODO implement parameters and headers
def validate_parameters_and_headers(self, resource):
return True
@staticmethod
def should_validate_body(validator):
return validator["validateRequestBody"]
@staticmethod
def should_validate_request(validator):
return validator.get("validateRequestParameters")
# ------------
# API METHODS
# ------------
def run_authorizer(invocation_context: ApiInvocationContext, authorizer: Dict):
# TODO implement authorizers
pass
def authorize_invocation(invocation_context: ApiInvocationContext):
client = aws_stack.connect_to_service("apigateway")
authorizers = client.get_authorizers(restApiId=invocation_context.api_id, limit=100).get(
"items", []
)
for authorizer in authorizers:
run_authorizer(invocation_context, authorizer)
def validate_api_key(api_key: str, stage: str):
usage_plan_ids = []
client = aws_stack.connect_to_service("apigateway")
usage_plans = client.get_usage_plans()
for item in usage_plans.get("items", []):
api_stages = item.get("apiStages", [])
for api_stage in api_stages:
if api_stage.get("stage") == stage:
usage_plan_ids.append(item.get("id"))
for usage_plan_id in usage_plan_ids:
usage_plan_keys = client.get_usage_plan_keys(usagePlanId=usage_plan_id)
for key in usage_plan_keys.get("items", []):
if key.get("value") == api_key:
return True
return False
def is_api_key_valid(is_api_key_required: bool, headers: Dict[str, str], stage: str):
if not is_api_key_required:
return True
api_key = headers.get("X-API-Key")
if not api_key:
return False
return validate_api_key(api_key, stage)
def update_content_length(response: Response):
if response and response.content is not None:
response.headers["Content-Length"] = str(len(response.content))
def apply_request_parameters(
uri: str, integration: Dict[str, Any], path_params: Dict[str, str], query_params: Dict[str, str]
):
request_parameters = integration.get("requestParameters")
uri = uri or integration.get("uri") or integration.get("integrationUri") or ""
if request_parameters:
for key in path_params:
# check if path_params is present in the integration request parameters
request_param_key = f"integration.request.path.{key}"
request_param_value = f"method.request.path.{key}"
if request_parameters.get(request_param_key, None) == request_param_value:
uri = uri.replace(f"{{{key}}}", path_params[key])
if integration.get("type") != "HTTP_PROXY" and request_parameters:
for key in query_params.copy():
request_query_key = f"integration.request.querystring.{key}"
request_param_val = f"method.request.querystring.{key}"
if request_parameters.get(request_query_key, None) != request_param_val:
query_params.pop(key)
return add_query_params_to_url(uri, query_params)
def apply_template(
integration: Dict[str, Any],
req_res_type: str,
data: InvocationPayload,
path_params=None,
query_params=None,
headers=None,
context=None,
):
if path_params is None:
path_params = {}
if query_params is None:
query_params = {}
if headers is None:
headers = {}
if context is None:
context = {}
integration_type = integration.get("type") or integration.get("integrationType")
if integration_type in ["HTTP", "AWS"]:
# apply custom request template
content_type = APPLICATION_JSON # TODO: make configurable!
template = integration.get("%sTemplates" % req_res_type, {}).get(content_type)
if template:
variables = {"context": context or {}}
input_ctx = {"body": data}
# little trick to flatten the input context so velocity templates
# work from the root.
# orig - { "body": '{"action": "$default","message":"foobar"}'
# after - {
# "body": '{"action": "$default","message":"foobar"}',
# "action": "$default",
# "message": "foobar"
# }
if data:
dict_pack = try_json(data)
if isinstance(dict_pack, dict):
for k, v in dict_pack.items():
input_ctx.update({k: v})
def _params(name=None):
# See https://docs.aws.amazon.com/apigateway/latest/developerguide/
# api-gateway-mapping-template-reference.html#input-variable-reference
# Returns "request parameter from the path, query string, or header value (searched in that order)"
combined = {}
combined.update(path_params or {})
combined.update(query_params or {})
combined.update(headers or {})
return combined if not name else combined.get(name)
input_ctx["params"] = _params
data = aws_stack.render_velocity_template(template, input_ctx, variables=variables)
return data
def apply_response_parameters(invocation_context: ApiInvocationContext):
response = invocation_context.response
integration = invocation_context.integration
int_responses = integration.get("integrationResponses") or {}
if not int_responses:
return response
entries = list(int_responses.keys())
return_code = str(response.status_code)
if return_code not in entries:
if len(entries) > 1:
LOG.info("Found multiple integration response status codes: %s", entries)
return response
return_code = entries[0]
response_params = int_responses[return_code].get("responseParameters", {})
for key, value in response_params.items():
# TODO: add support for method.response.body, etc ...
if str(key).lower().startswith("method.response.header."):
header_name = key[len("method.response.header.") :]
response.headers[header_name] = value.strip("'")
return response
def set_api_id_stage_invocation_path(
invocation_context: ApiInvocationContext,
) -> ApiInvocationContext:
# skip if all details are already available
values = (
invocation_context.api_id,
invocation_context.stage,
invocation_context.path_with_query_string,
)
if all(values):
return invocation_context
# skip if this is a websocket request
if invocation_context.is_websocket_request():
return invocation_context
path = invocation_context.path
headers = invocation_context.headers
path_match = re.search(PATH_REGEX_USER_REQUEST, path)
host_header = headers.get(HEADER_LOCALSTACK_EDGE_URL, "") or headers.get("Host") or ""
host_match = re.search(HOST_REGEX_EXECUTE_API, host_header)
test_invoke_match = re.search(PATH_REGEX_TEST_INVOKE_API, path)
if path_match:
api_id = path_match.group(1)
stage = path_match.group(2)
relative_path_w_query_params = "/%s" % path_match.group(3)
elif host_match:
api_id = extract_api_id_from_hostname_in_url(host_header)
stage = path.strip("/").split("/")[0]
relative_path_w_query_params = "/%s" % path.lstrip("/").partition("/")[2]
elif test_invoke_match:
# special case: fetch the resource details for TestInvokeApi invocations
stage = None
region_name = invocation_context.region_name
api_id = test_invoke_match.group(1)
resource_id = test_invoke_match.group(2)
query_string = test_invoke_match.group(4) or ""
apigateway = aws_stack.connect_to_service(
service_name="apigateway", region_name=region_name
)
resource = apigateway.get_resource(restApiId=api_id, resourceId=resource_id)
resource_path = resource.get("path")
relative_path_w_query_params = f"{resource_path}{query_string}"
else:
raise Exception(
f"Unable to extract API Gateway details from request: {path} {dict(headers)}"
)
if api_id:
# set current region in request thread local, to ensure aws_stack.get_region() works properly
if getattr(THREAD_LOCAL, "request_context", None) is not None:
THREAD_LOCAL.request_context.headers[MARKER_APIGW_REQUEST_REGION] = API_REGIONS.get(
api_id, ""
)
# set details in invocation context
invocation_context.api_id = api_id
invocation_context.stage = stage
invocation_context.path_with_query_string = relative_path_w_query_params
return invocation_context
def extract_api_id_from_hostname_in_url(hostname: str) -> str:
"""Extract API ID 'id123' from URLs like https://id123.execute-api.localhost.localstack.cloud:4566"""
match = re.match(HOST_REGEX_EXECUTE_API, hostname)
api_id = match.group(1)
return api_id
def invoke_rest_api_from_request(invocation_context: ApiInvocationContext):
set_api_id_stage_invocation_path(invocation_context)
try:
return invoke_rest_api(invocation_context)
except AuthorizationError as e:
api_id = invocation_context.api_id
return make_error_response("Not authorized to invoke REST API %s: %s" % (api_id, e), 403)
def invoke_rest_api(invocation_context: ApiInvocationContext):
invocation_path = invocation_context.path_with_query_string
raw_path = invocation_context.path or invocation_path
method = invocation_context.method
headers = invocation_context.headers
# run gateway authorizers for this request
authorize_invocation(invocation_context)
extracted_path, resource = get_target_resource_details(invocation_context)
if not resource:
return make_error_response("Unable to find path %s" % invocation_context.path, 404)
# validate request
validator = RequestValidator(invocation_context, aws_stack.connect_to_service("apigateway"))
if not validator.is_request_valid():
return make_error_response("Invalid request body", 400)
api_key_required = resource.get("resourceMethods", {}).get(method, {}).get("apiKeyRequired")
if not is_api_key_valid(api_key_required, headers, invocation_context.stage):
return make_error_response("Access denied - invalid API key", 403)
integrations = resource.get("resourceMethods", {})
integration = integrations.get(method, {})
if not integration:
# HttpMethod: '*'
# ResourcePath: '/*' - produces 'X-AMAZON-APIGATEWAY-ANY-METHOD'
integration = integrations.get("ANY", {}) or integrations.get(
"X-AMAZON-APIGATEWAY-ANY-METHOD", {}
)
integration = integration.get("methodIntegration")
if not integration:
if method == "OPTIONS" and "Origin" in headers:
# default to returning CORS headers if this is an OPTIONS request
return get_cors_response(headers)
return make_error_response(
"Unable to find integration for: %s %s (%s)" % (method, invocation_path, raw_path),
404,
)
res_methods = resource.get("resourceMethods", {})
meth_integration = res_methods.get(method, {}).get("methodIntegration", {})
int_responses = meth_integration.get("integrationResponses", {})
response_templates = int_responses.get("200", {}).get("responseTemplates", {})
# update fields in invocation context, then forward request to next handler
invocation_context.resource = resource
invocation_context.resource_path = extracted_path
invocation_context.response_templates = response_templates
invocation_context.integration = integration
return invoke_rest_api_integration(invocation_context)
def invoke_rest_api_integration(invocation_context: ApiInvocationContext):
try:
response = invoke_rest_api_integration_backend(invocation_context)
invocation_context.response = response
response = apply_response_parameters(invocation_context)
return response
except Exception as e:
msg = f"Error invoking integration for API Gateway ID '{invocation_context.api_id}': {e}"
LOG.exception(msg)
return make_error_response(msg, 400)
# TODO: refactor this to have a class per integration type to make it easy to
# test the encapsulated logic
def invoke_rest_api_integration_backend(invocation_context: ApiInvocationContext):
# define local aliases from invocation context
invocation_path = invocation_context.path_with_query_string
method = invocation_context.method
path = invocation_context.path
data = invocation_context.data
headers = invocation_context.headers
api_id = invocation_context.api_id
stage = invocation_context.stage
resource_path = invocation_context.resource_path
response_templates = invocation_context.response_templates
integration = invocation_context.integration
# extract integration type and path parameters
relative_path, query_string_params = extract_query_string_params(path=invocation_path)
integration_type_orig = integration.get("type") or integration.get("integrationType") or ""
integration_type = integration_type_orig.upper()
uri = integration.get("uri") or integration.get("integrationUri") or ""
try:
path_params = extract_path_params(path=relative_path, extracted_path=resource_path)
except Exception:
path_params = {}
if (uri.startswith("arn:aws:apigateway:") and ":lambda:path" in uri) or uri.startswith(
"arn:aws:lambda"
):
if integration_type in ["AWS", "AWS_PROXY"]:
func_arn = uri
if ":lambda:path" in uri:
func_arn = (
uri.split(":lambda:path")[1].split("functions/")[1].split("/invocations")[0]
)
# apply custom request template
data_str = data
is_base64_encoded = False
try:
data_str = json.dumps(data) if isinstance(data, (dict, list)) else to_str(data)
data_str = apply_template(
integration,
"request",
data_str,
path_params=path_params,
query_params=query_string_params,
headers=headers,
)
except UnicodeDecodeError:
data_str = base64.b64encode(data_str)
is_base64_encoded = True
except Exception as e:
LOG.warning("Unable to convert API Gateway payload to str: %s", (e))
pass
# Sample request context:
# https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
request_context = get_event_request_context(invocation_context)
stage_variables = (
get_stage_variables(api_id, stage)
if not is_test_invoke_method(method, path)
else None
)
# TODO: change this signature to InvocationContext as well!
result = lambda_api.process_apigateway_invocation(
func_arn,
relative_path,
data_str,
stage,
api_id,
headers,
is_base64_encoded=is_base64_encoded,
path_params=path_params,
query_string_params=query_string_params,
method=method,
resource_path=resource_path,
request_context=request_context,
event_context=invocation_context.context,
stage_variables=stage_variables,
)
if isinstance(result, FlaskResponse):
response = flask_to_requests_response(result)
elif isinstance(result, Response):
response = result
else:
response = LambdaResponse()
parsed_result = (
result if isinstance(result, dict) else json.loads(str(result or "{}"))
)
parsed_result = common.json_safe(parsed_result)
parsed_result = {} if parsed_result is None else parsed_result
response.status_code = int(parsed_result.get("statusCode", 200))
parsed_headers = parsed_result.get("headers", {})
if parsed_headers is not None:
response.headers.update(parsed_headers)
try:
result_body = parsed_result.get("body")
if isinstance(result_body, dict):
response._content = json.dumps(result_body)
else:
body_bytes = to_bytes(to_str(result_body or ""))
if parsed_result.get("isBase64Encoded", False):
body_bytes = base64.b64decode(body_bytes)
response._content = body_bytes
except Exception as e:
LOG.warning("Couldn't set Lambda response content: %s", e)
response._content = "{}"
update_content_length(response)
response.multi_value_headers = parsed_result.get("multiValueHeaders") or {}
# apply custom response template
response._content = apply_template(integration, "response", response._content)
response.headers["Content-Length"] = str(len(response.content or ""))
return response
raise Exception(
'API Gateway integration type "%s", action "%s", method "%s" invalid or not yet implemented'
% (integration_type, uri, method)
)
elif integration_type == "AWS":
if "kinesis:action/" in uri:
if uri.endswith("kinesis:action/PutRecord"):
target = kinesis_listener.ACTION_PUT_RECORD
elif uri.endswith("kinesis:action/PutRecords"):
target = kinesis_listener.ACTION_PUT_RECORDS
elif uri.endswith("kinesis:action/ListStreams"):
target = kinesis_listener.ACTION_LIST_STREAMS
else:
LOG.info(
f"Unexpected API Gateway integration URI '{uri}' for integration type {integration_type}",
)
target = ""
try:
data = json.dumps(data) if isinstance(data, (dict, list)) else to_str(data)
payload = apply_template(
integration,
"request",
data,
path_params=path_params,
query_params=query_string_params,
headers=headers,
)
except Exception as e:
LOG.warning("Unable to convert API Gateway payload to str", e)
raise
# forward records to target kinesis stream
headers = aws_stack.mock_aws_request_headers(
service="kinesis", region_name=invocation_context.region_name
)
headers["X-Amz-Target"] = target
result = common.make_http_request(
url=config.service_url("kineses"), data=payload, headers=headers, method="POST"
)
# apply response template
result = apply_request_response_templates(
result, response_templates, content_type=APPLICATION_JSON
)
return result
elif "states:action/" in uri:
action = uri.split("/")[-1]
if APPLICATION_JSON in integration.get("requestTemplates", {}):
payload = apply_request_response_templates(
data,
integration.get("requestTemplates"),
content_type=APPLICATION_JSON,
as_json=True,
)
else:
payload = json.loads(data.decode("utf-8"))
client = aws_stack.connect_to_service("stepfunctions")
if isinstance(payload.get("input"), dict):
payload["input"] = json.dumps(payload["input"])
# Hot fix since step functions local package responses: Unsupported Operation: 'StartSyncExecution'
method_name = (
camel_to_snake_case(action) if action != "StartSyncExecution" else "start_execution"
)
try:
method = getattr(client, method_name)
except AttributeError:
msg = "Invalid step function action: %s" % method_name
LOG.error(msg)
return make_error_response(msg, 400)
result = method(**payload)
result = json_safe({k: result[k] for k in result if k not in "ResponseMetadata"})
response = requests_response(
content=result,
headers=aws_stack.mock_aws_request_headers(),
)
if action == "StartSyncExecution":
# poll for the execution result and return it
result = await_sfn_execution_result(result["executionArn"])
result_status = result.get("status")
if result_status != "SUCCEEDED":
return make_error_response(
"StepFunctions execution %s failed with status '%s'"
% (result["executionArn"], result_status),
500,
)
result = json_safe(result)
response = requests_response(content=result)
# apply response templates
response = apply_request_response_templates(
response, response_templates, content_type=APPLICATION_JSON
)
return response
elif "s3:path/" in uri and method == "GET":
s3 = aws_stack.connect_to_service("s3")
uri_match = re.match(TARGET_REGEX_S3_URI, uri)
if uri_match:
bucket, object_key = uri_match.group("bucket", "object")
LOG.debug("Getting request for bucket %s object %s", bucket, object_key)
try:
object = s3.get_object(Bucket=bucket, Key=object_key)
except s3.exceptions.NoSuchKey:
msg = "Object %s not found" % object_key
LOG.debug(msg)
return make_error_response(msg, 404)
headers = aws_stack.mock_aws_request_headers(service="s3")
if object.get("ContentType"):
headers["Content-Type"] = object["ContentType"]
# stream used so large files do not fill memory
response = request_response_stream(stream=object["Body"], headers=headers)
return response
else:
msg = "Request URI does not match s3 specifications"
LOG.warning(msg)
return make_error_response(msg, 400)
if method == "POST":
if uri.startswith("arn:aws:apigateway:") and ":sqs:path" in uri:
template = integration["requestTemplates"][APPLICATION_JSON]
account_id, queue = uri.split("/")[-2:]
region_name = uri.split(":")[3]
if "GetQueueUrl" in template or "CreateQueue" in template:
new_request = (
f"{aws_stack.render_velocity_template(template, data)}&QueueName={queue}"
)
else:
queue_url = f"{config.get_edge_url()}/{account_id}/{queue}"
new_request = (
f"{aws_stack.render_velocity_template(template, data)}&QueueUrl={queue_url}"
)
headers = aws_stack.mock_aws_request_headers(service="sqs", region_name=region_name)
url = urljoin(config.service_url("sqs"), f"{TEST_AWS_ACCOUNT_ID}/{queue}")
result = common.make_http_request(
url, method="POST", headers=headers, data=new_request
)
return result
raise Exception(
'API Gateway AWS integration action URI "%s", method "%s" not yet implemented'
% (uri, method)
)
elif integration_type == "AWS_PROXY":
if uri.startswith("arn:aws:apigateway:") and ":dynamodb:action" in uri:
# arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection
table_name = uri.split(":dynamodb:action")[1].split("&Table=")[1]
action = uri.split(":dynamodb:action")[1].split("&Table=")[0]
if "PutItem" in action and method == "PUT":
response_template = response_templates.get("application/json")
if response_template is None:
msg = "Invalid response template defined in integration response."
LOG.info("%s Existing: %s", msg, response_templates)
return make_error_response(msg, 404)
response_template = json.loads(response_template)
if response_template["TableName"] != table_name:
msg = "Invalid table name specified in integration response template."
return make_error_response(msg, 404)
dynamo_client = aws_stack.connect_to_resource("dynamodb")
table = dynamo_client.Table(table_name)
event_data = {}
data_dict = json.loads(data)
for key, _ in response_template["Item"].items():
event_data[key] = data_dict[key]
table.put_item(Item=event_data)
response = requests_response(event_data)
return response
else:
raise Exception(
'API Gateway action uri "%s", integration type %s not yet implemented'
% (uri, integration_type)
)
elif integration_type in ["HTTP_PROXY", "HTTP"]:
if ":servicediscovery:" in uri:
# check if this is a servicediscovery integration URI
client = aws_stack.connect_to_service("servicediscovery")
service_id = uri.split("/")[-1]
instances = client.list_instances(ServiceId=service_id)["Instances"]
instance = (instances or [None])[0]
if instance and instance.get("Id"):
uri = "http://%s/%s" % (instance["Id"], invocation_path.lstrip("/"))
# apply custom request template
data = apply_template(integration, "request", data)
if isinstance(data, dict):
data = json.dumps(data)
uri = apply_request_parameters(
uri, integration=integration, path_params=path_params, query_params=query_string_params
)
result = requests.request(method=method, url=uri, data=data, headers=headers)
# apply custom response template
result = apply_template(integration, "response", result)
return result
elif integration_type == "MOCK":
# return empty response - details filled in via responseParameters above...
return requests_response({})
if method == "OPTIONS":
# fall back to returning CORS headers if this is an OPTIONS request
return get_cors_response(headers)
raise Exception(
'API Gateway integration type "%s", method "%s", URI "%s" not yet implemented'
% (integration_type, method, uri)
)
def get_target_resource_details(invocation_context: ApiInvocationContext) -> Tuple[str, Dict]:
"""Look up and return the API GW resource (path pattern + resource dict) for the given invocation context."""
path_map = helpers.get_rest_api_paths(rest_api_id=invocation_context.api_id)
relative_path = invocation_context.invocation_path
try:
extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)
invocation_context.resource = resource
return extracted_path, resource
except Exception:
return None, None
def get_target_resource_method(invocation_context: ApiInvocationContext) -> Optional[Dict]:
"""Look up and return the API GW resource method for the given invocation context."""
_, resource = get_target_resource_details(invocation_context)
if not resource:
return None
methods = resource.get("resourceMethods") or {}
method_name = invocation_context.method.upper()
method_details = methods.get(method_name) or methods.get("ANY")
return method_details
def get_stage_variables(api_id: str, stage: str) -> Dict[str, str]:
if not stage:
return {}
region_name = [name for name, region in apigateway_backends.items() if api_id in region.apis][0]
api_gateway_client = aws_stack.connect_to_service("apigateway", region_name=region_name)
try:
response = api_gateway_client.get_stage(restApiId=api_id, stageName=stage)
return response.get("variables")
except Exception:
LOG.info(f"Failed to get stage {stage} for api id {api_id}")
return {}
def get_event_request_context(invocation_context: ApiInvocationContext):
method = invocation_context.method
path = invocation_context.path
headers = invocation_context.headers
integration_uri = invocation_context.integration_uri
resource_path = invocation_context.resource_path
resource_id = invocation_context.resource_id
set_api_id_stage_invocation_path(invocation_context)
relative_path, query_string_params = extract_query_string_params(
path=invocation_context.path_with_query_string
)
api_id = invocation_context.api_id
stage = invocation_context.stage
source_ip = headers.get("X-Forwarded-For", ",").split(",")[-2].strip()
integration_uri = integration_uri or ""
account_id = integration_uri.split(":lambda:path")[-1].split(":function:")[0].split(":")[-1]
account_id = account_id or TEST_AWS_ACCOUNT_ID
domain_name = f"{api_id}.execute-api.{LOCALHOST_HOSTNAME}"
request_context = {
"resourcePath": resource_path or relative_path,
"apiId": api_id,
"domainPrefix": api_id,
"domainName": domain_name,
"accountId": account_id,
"resourceId": resource_id,
"requestId": long_uid(),
"identity": {
"accountId": account_id,
"sourceIp": source_ip,
"userAgent": headers.get("User-Agent"),
},
"httpMethod": method,
"protocol": "HTTP/1.1",
"requestTime": pytz.utc.localize(datetime.datetime.utcnow()).strftime(
REQUEST_TIME_DATE_FORMAT
),
"requestTimeEpoch": int(time.time() * 1000),
}
# set "authorizer" and "identity" event attributes from request context
auth_context = invocation_context.auth_context
if auth_context:
request_context["authorizer"] = auth_context
request_context["identity"].update(invocation_context.auth_identity or {})
if not is_test_invoke_method(method, path):
request_context["path"] = (f"/{stage}" if stage else "") + relative_path
request_context["stage"] = stage
return request_context
def apply_request_response_templates(
data: Union[Response, bytes],
templates: Dict[str, str],
content_type: str = None,
as_json: bool = False,
):
"""Apply the matching request/response template (if it exists) to the payload data and return the result"""
content_type = content_type or APPLICATION_JSON
is_response = isinstance(data, Response)
templates = templates or {}
template = templates.get(content_type)
if not template:
return data
content = (data.content if is_response else data) or ""
result = aws_stack.render_velocity_template(template, content, as_json=as_json)
if is_response:
data._content = result
update_content_length(data)
return data
return result
def is_test_invoke_method(method, path):
return method == "POST" and bool(re.match(PATH_REGEX_TEST_INVOKE_API, path))
# instantiate listener
UPDATE_APIGATEWAY = ProxyListenerApiGateway()
|
the-stack_0_23482 | """djangorest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('api.urls')),
path('api-auth/', include('rest_framework.urls'))
]
|
the-stack_0_23483 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SetupTask(Model):
"""Specifies a setup task which can be used to customize the compute nodes of
the cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param command_line: Required. Command Line to start Setup process.
:type command_line: str
:param environment_variables: Collection of environment variables to be
set for setup task.
:type environment_variables:
list[~azure.mgmt.batchai.models.EnvironmentVariable]
:param secrets: Collection of environment variables with secret values to
be set for setup task. Server will never report values of these variables
back.
:type secrets:
list[~azure.mgmt.batchai.models.EnvironmentVariableWithSecretValue]
:param run_elevated: Specifies whether to run the setup task under root
account. The default value is false. Note. Non-elevated tasks are run
under an account added into sudoer list and can perform sudo when
required. Default value: False .
:type run_elevated: bool
:param std_out_err_path_prefix: Required. The prefix of a path where the
Batch AI service will upload the stdout and stderr of the setup task.
:type std_out_err_path_prefix: str
:ivar std_out_err_path_suffix: A path segment appended by Batch AI to
stdOutErrPathPrefix to form a path where stdout and stderr of the setup
task will be uploaded. Batch AI creates the setup task output directories
under an unique path to avoid conflicts between different clusters. You
can concatinate stdOutErrPathPrefix and stdOutErrPathSuffix to get the
full path to the output directory.
:vartype std_out_err_path_suffix: str
"""
_validation = {
'command_line': {'required': True},
'std_out_err_path_prefix': {'required': True},
'std_out_err_path_suffix': {'readonly': True},
}
_attribute_map = {
'command_line': {'key': 'commandLine', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '[EnvironmentVariable]'},
'secrets': {'key': 'secrets', 'type': '[EnvironmentVariableWithSecretValue]'},
'run_elevated': {'key': 'runElevated', 'type': 'bool'},
'std_out_err_path_prefix': {'key': 'stdOutErrPathPrefix', 'type': 'str'},
'std_out_err_path_suffix': {'key': 'stdOutErrPathSuffix', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SetupTask, self).__init__(**kwargs)
self.command_line = kwargs.get('command_line', None)
self.environment_variables = kwargs.get('environment_variables', None)
self.secrets = kwargs.get('secrets', None)
self.run_elevated = kwargs.get('run_elevated', False)
self.std_out_err_path_prefix = kwargs.get('std_out_err_path_prefix', None)
self.std_out_err_path_suffix = None
|
the-stack_0_23484 | import json, os
from flask import session
from flask_socketio import emit
from . import globs, utility
from .engine.writer import encode
from .. import socketio
"""Seat stuff."""
def is_free(i):
if i == -1:
return True
if i in range(len(globs.seats)):
return not globs.seats[i]
return False
def is_taken(i):
return not is_free(i)
def free_seat(i):
if i in range(len(globs.seats)):
del session["player"]
globs.seats[i] = False
socketio.emit("free_seat", i)
def take_seat(i):
if i in range(len(globs.seats)):
session["player"] = i
globs.seats[i] = True
socketio.emit("take_seat", i)
@socketio.on("disconnect", namespace = "/")
def disconnected():
"""Someone disconnected. If it was a player, free the seat."""
player = session.get("player")
if player != None:
free_seat(player)
@socketio.on("joined", namespace = "/")
def joined():
"""
Send data about game format and free seats. If the game is running,
send the history of the game.
"""
format_data = encode(globs.game.form)
seats_data = encode(globs.seats)
emit("welcome", {"format": format_data, "seats": seats_data})
if len(globs.history) > 0:
globs.send_history()
@socketio.on("request_player_change", namespace = "/")
def request_pc(nplayer):
"""Player wants to change seats. Is it free?"""
if is_free(nplayer):
player = session.get("player")
if player != None:
free_seat(player)
take_seat(nplayer)
emit("confirmed_player_change", nplayer)
else:
emit("declined_player_change")
@socketio.on("save_cfg", namespace = "/")
def save_cfg(name, cursors, finisher):
"""Save cursor controls for later retrieval."""
if name in globs.configs:
emit("config_name_taken")
else:
globs.configs[name] = {"cursors": cursors, "finisher": finisher}
fname = os.path.join(utility.ancestor(__file__, 2), "configs", name)
with open(fname, "w") as cfg:
json.dump(globs.configs[name], cfg)
emit("save_cfg_success")
@socketio.on("load_cfg", namespace = "/")
def load_cfg(name):
"""Load previously saved cursor controls."""
if name not in globs.configs:
emit("config_not_exist")
else:
emit("cfg", globs.configs[name])
@socketio.on("actions", namespace = "/")
def actions(data):
pid = session.get("player")
if pid not in range(globs.game.form.num_players):
return
for key in data:
if key == "finish":
globs.finish(pid)
continue
if not key.isdigit():
continue
cid = int(key)
globs.action(pid, cid, data[key])
|
the-stack_0_23486 | import math
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
class Cosh(Instruccion):
def __init__(self, valor, strGram, linea, columna):
Instruccion.__init__(self,Tipo(Tipo_Dato.DOUBLE_PRECISION),linea,columna,strGram)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
resultado = self.valor.ejecutar(tabla,arbol)
if isinstance(resultado, Excepcion):
return resultado
if self.valor.tipo.tipo != Tipo_Dato.SMALLINT and self.valor.tipo.tipo != Tipo_Dato.INTEGER and self.valor.tipo.tipo != Tipo_Dato.BIGINT and self.valor.tipo.tipo != Tipo_Dato.DECIMAL and self.valor.tipo.tipo != Tipo_Dato.NUMERIC and self.valor.tipo.tipo != Tipo_Dato.REAL and self.valor.tipo.tipo != Tipo_Dato.DOUBLE_PRECISION:
error = Excepcion('42883',"Semántico","No existe la función cosh("+self.valor.tipo.toString()+")",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
try:
return math.cosh(resultado)
except ValueError as c:
error = Excepcion('22003',"Semántico","La entrada está fuera de rango",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.