filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_4710 | # Author: Yi Jiang, <[email protected]>, Institute of Physics, Chinese Academy of Sciences
# Adapted from the kdotp-symmetry package by: Dominik Gresch <[email protected]> © 2017-2018, ETH Zurich, Institut für Theoretische Physik
"""
Defines functions to construct the basis of the symmetry-constrained Hamiltonian.
"""
import sympy as sp
from sympy.physics.quantum import TensorProduct
import numpy as np
from functools import reduce
import scipy.linalg as la
from ._expr_utils import monomial_basis, expr_to_vector, matrix_to_expr_operator
from ._repr_utils import hermitian_to_vector, hermitian_basis, repr_to_matrix_operator, check_orthogonal, frobenius_product, solve_linear_system_numpy
from ._repr_utils import hermitian_pauli_basis, hermitian_pauli_basis_symbols
from ._linalg import intersection_basis, nullspace_blocked
from ._to_matrix import to_matrix
from ._logging_setup import LOGGER
from ._decompose_kp import decompose_kp
def symmetric_hamiltonian(
symmetry_operations,
kp_variable = 'k',
order = [0],
repr_basis = 'pauli',
msg_num = None,
kvec = None,
):
r"""
Calculates the basis of the symmetric Hamiltonian for a given set of symmetry operations.
:param symmetry_operations: The symmetry operations that the Hamiltonian should respect.
:type symmetry_operations: :py:class: `dict` with keys 'rotation_matrix', 'repr_matrix', 'repr_has_cc'.
:param kp_variable: The variable of the hamiltonian, can be anyone of 'k', 'E', 'B', 'e', 'k E', 'k B', 'E B', 'k E B'
:type kp_variable: :py:class:str
:param order: The list of orders of the monomials. Each number in the list specifies the order of a variable.
:type order: :py:class:`list` of :py:class:`int`
:param repr_basis: The basis for the hermitian matrices, with the same size as the representations.
By default, the :py:func:`.hermitian_pauli_basis` of the appropriate size is used.
:type repr_basis: :py:class:`list` of :py:mod:`sympy` matrices
:param msg_num & kvec: two string used to denote the magnetic space group and little group k,
used to locate linear representations in order to decompose kp hamiltonian.
:type msg_num & kvec: :py:class:str
:returns: Basis for the symmetric Hamiltonian, as a :py:class:`list` of :py:mod:`sympy` matrix expressions.
# Modified by YJ: if msg_num and kvec is specified, also return lists of decomposed repr and expr basis, otherwise return empty lists.
"""
# for sympy or numpy matrices
try:
repr_matrix_size = symmetry_operations[0]['repr_matrix'].shape[0]
# for plain lists -- this doesn't work for sympy matrices because
# their 'len' is the total number of elements
except AttributeError:
repr_matrix_size = len(symmetry_operations[0]['repr_matrix'])
repr_basis_type = 'pauli' if repr_basis == 'pauli' else None
if repr_basis == 'auto':
repr_basis = hermitian_basis(repr_matrix_size)
elif repr_basis == 'pauli':
repr_basis = hermitian_pauli_basis(repr_matrix_size)
repr_basis_symbols = hermitian_pauli_basis_symbols(repr_matrix_size)
if repr_basis not in ['auto', 'pauli']:
check_orthogonal(repr_basis)
Base_vec = ''
for t in kp_variable.split():
if t == 'k':
Base_vec += 'kx ky kz '
elif t == 'E':
Base_vec += 'Ex Ey Ez '
elif t == 'B':
Base_vec += 'Bx By Bz '
elif t == 'e':
Base_vec += 'ex ey ez '
Base_vec = sp.symbols(Base_vec)
expr_basis = monomial_basis(order, kp_variable)
expr_dim = len(expr_basis)
repr_dim = len(repr_basis)
repr_basis_norm_squares = [frobenius_product(b, b) for b in repr_basis]
full_dim = expr_dim * repr_dim
full_basis = [
sp.Matrix(x) for x in np.outer(expr_basis, repr_basis).
reshape(full_dim, repr_matrix_size, repr_matrix_size).tolist()
]
invariant_bases = []
expr_mat_collection = []
repr_mat_collection = []
for isym_op, sym_op in enumerate(symmetry_operations):
LOGGER.info('Calculating matrix form of expression.')
expr_mat = to_matrix(
operator=matrix_to_expr_operator(
sym_op['rotation_matrix'], repr_has_cc = sym_op['repr_has_cc'],
K_VEC = Base_vec
),
basis=expr_basis,
to_vector_fct=expr_to_vector,
K_VEC = Base_vec
)
expr_mat_collection.append(expr_mat)
LOGGER.info('Calculating matrix form of representation.')
repr_mat = to_matrix(
operator=repr_to_matrix_operator(
sym_op['repr_matrix'], complex_conjugate = sym_op['repr_has_cc']
),
basis=repr_basis,
to_vector_fct=hermitian_to_vector,
to_vector_kwargs=dict(basis_norm_squares=repr_basis_norm_squares)
)
repr_mat_collection.append(repr_mat)
# outer product
LOGGER.info('Calculating outer product.')
full_mat = TensorProduct(expr_mat, repr_mat)
# get Eig(F \ocross G, 1) basis
mat = full_mat - sp.eye(full_dim)
LOGGER.info('Calculating nullspace.')
nullspace_basis = nullspace_blocked(mat, simplify=sp.nsimplify)
# Modified by YJ: reshape here is necessary. The original np.array(nullspace_basis).tolist() will run into bugs for python>3.8
curr_basis = [ bs.reshape(1, expr_dim*repr_dim) for bs in nullspace_basis ]
if len(curr_basis) != _numeric_nullspace_dim(mat):
raise ValueError(
'Analytic and numeric dimensions of the nullspace of the matrix {mat} do not match'
.format(mat=mat)
)
invariant_bases.append(curr_basis)
LOGGER.info('Calculating basis intersection.')
basis_vectors = intersection_basis(*invariant_bases)
# ===== Added by YJ: decompose the kp model into symmetric basis ===== #
decomposed_repr_vec, decomposed_repr_mat, decomposed_expr, ir_str_list = [], [], [], []
for basis_vector in basis_vectors:
tmp_repr_vec, tmp_repr_mat, tmp_expr, linear_ir_str = decompose_kp(basis_vector, repr_basis, expr_basis, symmetry_operations, Base_vec, msg_num, kvec)
decomposed_repr_vec.append(tmp_repr_vec)
decomposed_repr_mat.append(tmp_repr_mat)
decomposed_expr.append(tmp_expr)
ir_str_list.append(linear_ir_str)
LOGGER.info('Expanding basis vectors.')
basis_vectors_expanded, decomposed_repr_symbols = [], []
for full_vec, repr_vec in zip(basis_vectors, decomposed_repr_vec):
basis_vectors_expanded.append( sum((v * b for v, b in zip(full_vec, full_basis)), sp.zeros(repr_matrix_size)) )
decomposed_repr_symbols.append([ reduce(lambda x, y : x+' + '+y, [str(sp.nsimplify(v)) + '* ' + b if v != 1 else b\
for v, b in zip(tmp, repr_basis_symbols) if v != 0]) for tmp in repr_vec ]) \
if repr_basis_type == 'pauli' else [None] * len(repr_vec)
_print_result(basis_vectors_expanded, basis_vectors, decomposed_expr, decomposed_repr_mat, decomposed_repr_symbols, ir_str_list)
return basis_vectors_expanded, decomposed_expr, decomposed_repr_mat
def _numeric_nullspace_dim(mat):
"""Numerically computes the nullspace dimension of a matrix."""
mat_numeric = np.array(mat.evalf().tolist(), dtype=complex)
eigenvals = la.eigvals(mat_numeric)
return np.sum(np.isclose(eigenvals, np.zeros_like(eigenvals)))
def _print_result(kpmodels, basis_vecs, expr_basis_vecs, repr_basis_mats, repr_basis_symbols, ir_str_list):
""" Print the result of kp models and decompoed basis"""
if len(kpmodels) == 0:
print('No symmetry-allowed kp models.')
else:
print('Number of independent kp models:', len(kpmodels))
for ith, kp, base_vec, rep, exp, rep_sym, ir in zip(range(len(kpmodels)), kpmodels, basis_vecs, repr_basis_mats, expr_basis_vecs, repr_basis_symbols, ir_str_list):
print('-----------------------------------------------------')
print('%d-th kp model:'%(ith+1))
print(kp)
print('Basis vector:', base_vec)
if exp == None:
print('Fail to decompose kp.')
else:
if ir:
print('\nDecomposed basis using linear IR:', ir)
else:
print('\nDecomposed basis (not symmetric):')
print('Coefficient basis:')
for ie in exp:
print(ie)
print('\nMatrix basis:')
for isym, ib in zip(rep_sym, rep):
print('Symbol:',isym, ' Expression:', ib, '\n')
print('-----------------------------------------------------')
|
the-stack_0_4711 | import json
from .models import *
def cookieCart(request):
try:
cart = json.loads(request.COOKIES['cart'])
except:
cart = {}
print('Cart:', cart)
items = []
order = {'cart_items' :0, 'cart_total' :0}
cartItems = order['cart_total']
for i in cart:
try:
cartItems += cart[i]['quantity']
item = Items.objects.get(id=i)
total = (item.price * cart[i]['quantity'])
order['cart_items'] += cart[i]['quantity']
order['cart_total'] += total
item = {
'item':{
'id': item.id,
'name': item.name,
'price': item.price,
'imageURL': item.imageURL,
},
'quantity': cart[i]['quantity'],
'get_total': total
}
items.append(item)
except:
pass
return {'items':items, 'order':order, 'cartItems':cartItems}
def cartData(request):
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer, complete=False)
items = order.orderitem_set.all()
cartItems = order.cart_items
else:
cookieData = cookieCart(request)
items = cookieData['items']
order = cookieData['order']
cartItems = cookieData['cartItems']
return {'items':items, 'order':order, 'cartItems':cartItems}
def guestOrder(request, data):
print('User not logged in.')
print('Cookies:', request.COOKIES)
name = data['form']['name']
email = data['form']['email']
cookieData = cookieCart(request)
items = cookieData['items']
customer, created = Customer.objects.get_or_create(email = email)
customer.name = name
customer.save()
order, created = Order.objects.get_or_create(customer=customer, complete=False)
for i in items:
item = Items.objects.get(id=i['item']['id'])
orderItem = OrderItem.objects.create(
item = item,
order = order,
quantity = i['quantity']
)
return customer, order |
the-stack_0_4713 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import time
from typing import Optional
import paddle
from yacs.config import CfgNode
from paddlespeech.cli.asr.infer import ASRExecutor
from paddlespeech.cli.log import logger
from paddlespeech.cli.utils import MODEL_HOME
from paddlespeech.resource import CommonTaskResource
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.modules.ctc import CTCDecoder
from paddlespeech.s2t.utils.utility import UpdateConfig
from paddlespeech.server.engine.base_engine import BaseEngine
from paddlespeech.server.utils.paddle_predictor import init_predictor
from paddlespeech.server.utils.paddle_predictor import run_model
__all__ = ['ASREngine', 'PaddleASRConnectionHandler']
class ASRServerExecutor(ASRExecutor):
def __init__(self):
super().__init__()
self.task_resource = CommonTaskResource(
task='asr', model_format='static')
def _init_from_path(self,
model_type: str='wenetspeech',
am_model: Optional[os.PathLike]=None,
am_params: Optional[os.PathLike]=None,
lang: str='zh',
sample_rate: int=16000,
cfg_path: Optional[os.PathLike]=None,
decode_method: str='attention_rescoring',
am_predictor_conf: dict=None):
"""
Init model and other resources from a specific path.
"""
self.max_len = 50
sample_rate_str = '16k' if sample_rate == 16000 else '8k'
tag = model_type + '-' + lang + '-' + sample_rate_str
self.max_len = 50
self.task_resource.set_task_model(model_tag=tag)
if cfg_path is None or am_model is None or am_params is None:
self.res_path = self.task_resource.res_dir
self.cfg_path = os.path.join(
self.res_path, self.task_resource.res_dict['cfg_path'])
self.am_model = os.path.join(self.res_path,
self.task_resource.res_dict['model'])
self.am_params = os.path.join(self.res_path,
self.task_resource.res_dict['params'])
logger.info(self.res_path)
logger.info(self.cfg_path)
logger.info(self.am_model)
logger.info(self.am_params)
else:
self.cfg_path = os.path.abspath(cfg_path)
self.am_model = os.path.abspath(am_model)
self.am_params = os.path.abspath(am_params)
self.res_path = os.path.dirname(
os.path.dirname(os.path.abspath(self.cfg_path)))
#Init body.
self.config = CfgNode(new_allowed=True)
self.config.merge_from_file(self.cfg_path)
with UpdateConfig(self.config):
if "deepspeech2" in model_type:
self.vocab = self.config.vocab_filepath
if self.config.spm_model_prefix:
self.config.spm_model_prefix = os.path.join(
self.res_path, self.config.spm_model_prefix)
self.text_feature = TextFeaturizer(
unit_type=self.config.unit_type,
vocab=self.vocab,
spm_model_prefix=self.config.spm_model_prefix)
self.config.decode.lang_model_path = os.path.join(
MODEL_HOME, 'language_model',
self.config.decode.lang_model_path)
lm_url = self.task_resource.res_dict['lm_url']
lm_md5 = self.task_resource.res_dict['lm_md5']
self.download_lm(
lm_url,
os.path.dirname(self.config.decode.lang_model_path), lm_md5)
elif "conformer" in model_type or "transformer" in model_type:
raise Exception("wrong type")
else:
raise Exception("wrong type")
# AM predictor
self.am_predictor_conf = am_predictor_conf
self.am_predictor = init_predictor(
model_file=self.am_model,
params_file=self.am_params,
predictor_conf=self.am_predictor_conf)
# decoder
self.decoder = CTCDecoder(
odim=self.config.output_dim, # <blank> is in vocab
enc_n_units=self.config.rnn_layer_size * 2,
blank_id=self.config.blank_id,
dropout_rate=0.0,
reduction=True, # sum
batch_average=True, # sum / batch_size
grad_norm_type=self.config.get('ctc_grad_norm_type', None))
@paddle.no_grad()
def infer(self, model_type: str):
"""
Model inference and result stored in self.output.
"""
cfg = self.config.decode
audio = self._inputs["audio"]
audio_len = self._inputs["audio_len"]
if "deepspeech2" in model_type:
decode_batch_size = audio.shape[0]
# init once
self.decoder.init_decoder(
decode_batch_size, self.text_feature.vocab_list,
cfg.decoding_method, cfg.lang_model_path, cfg.alpha, cfg.beta,
cfg.beam_size, cfg.cutoff_prob, cfg.cutoff_top_n,
cfg.num_proc_bsearch)
output_data = run_model(self.am_predictor,
[audio.numpy(), audio_len.numpy()])
probs = output_data[0]
eouts_len = output_data[1]
batch_size = probs.shape[0]
self.decoder.reset_decoder(batch_size=batch_size)
self.decoder.next(probs, eouts_len)
trans_best, trans_beam = self.decoder.decode()
# self.model.decoder.del_decoder()
self._outputs["result"] = trans_best[0]
elif "conformer" in model_type or "transformer" in model_type:
raise Exception("invalid model name")
else:
raise Exception("invalid model name")
class ASREngine(BaseEngine):
"""ASR server engine
Args:
metaclass: Defaults to Singleton.
"""
def __init__(self):
super(ASREngine, self).__init__()
def init(self, config: dict) -> bool:
"""init engine resource
Args:
config_file (str): config file
Returns:
bool: init failed or success
"""
self.executor = ASRServerExecutor()
self.config = config
self.engine_type = "inference"
try:
if self.config.am_predictor_conf.device is not None:
self.device = self.config.am_predictor_conf.device
else:
self.device = paddle.get_device()
paddle.set_device(self.device)
except Exception as e:
logger.error(
"Set device failed, please check if device is already used and the parameter 'device' in the yaml file"
)
logger.error(e)
return False
self.executor._init_from_path(
model_type=self.config.model_type,
am_model=self.config.am_model,
am_params=self.config.am_params,
lang=self.config.lang,
sample_rate=self.config.sample_rate,
cfg_path=self.config.cfg_path,
decode_method=self.config.decode_method,
am_predictor_conf=self.config.am_predictor_conf)
logger.info("Initialize ASR server engine successfully.")
return True
class PaddleASRConnectionHandler(ASRServerExecutor):
def __init__(self, asr_engine):
"""The PaddleSpeech ASR Server Connection Handler
This connection process every asr server request
Args:
asr_engine (ASREngine): The ASR engine
"""
super().__init__()
self.input = None
self.output = None
self.asr_engine = asr_engine
self.executor = self.asr_engine.executor
self.config = self.executor.config
self.max_len = self.executor.max_len
self.decoder = self.executor.decoder
self.am_predictor = self.executor.am_predictor
self.text_feature = self.executor.text_feature
def run(self, audio_data):
"""engine run
Args:
audio_data (bytes): base64.b64decode
"""
if self._check(
io.BytesIO(audio_data), self.asr_engine.config.sample_rate,
self.asr_engine.config.force_yes):
logger.info("start running asr engine")
self.preprocess(self.asr_engine.config.model_type,
io.BytesIO(audio_data))
st = time.time()
self.infer(self.asr_engine.config.model_type)
infer_time = time.time() - st
self.output = self.postprocess() # Retrieve result of asr.
logger.info("end inferring asr engine")
else:
logger.info("file check failed!")
self.output = None
logger.info("inference time: {}".format(infer_time))
logger.info("asr engine type: paddle inference")
|
the-stack_0_4714 | import pathlib
import pandas as pd
import panda_scripts as ps
import joint_pca as pca
from argument_validators import alphanumeric
from shutil import rmtree
from math import floor
from numpy.random import randint
from os import remove
import inspect
from __utils import *
# This is a wrapper for all the data-processing scripts below.
#
# Arguments:
# MONTH_DICT A dictionary from the scope above this level, to be filled by this function
# PARAMS_FILE _______
# LOG_FILE Path for the log file
# SM_FILE File with sm data
# COV_FILE File with cov data
# COV_LAYERS List of strings, length must match the number of layers in the COV_FILE
# EVAL_FILE File with eval data
# SHAPE_DIR Folder path where shape .rds files are (to be) stored
# REG_LIST List of regions to cut out of the sm file
# BUFFER km of buffer around each region
# TRAIN_DIR The directory of training files
# MONTH Numeric month to use in the train data
# EVAL_DIR The directory of the evaluation files
# USE_PCA Run PCA dimension reduction on both train and eval files
# Assumes same covariate columns are present in same order
# VALIDATE 1 to save SM values from TEST for residuals
# 2 to save SM values from EVAL for comparison
# STATS_FILE Path to function for computing statistics on test data
# Default empty string, no stats computed
# RAND Random seed; default 0 generates new seed
# SUPER If 1/True, expands test file of ecoregions to one level up;
# Default 0, nothing changed
# MIN_T_POINTS Minimum number of training points required in each region;
# Default -1 doesn't check
#
# Output:
# The output folder depends on which preprocessing steps are taken
# A log file is generated in LOG_DIR/proc-log#.txt,
# where # is the least unused natural number
def curate(MONTH_DICT, PARAMS_FILE, LOG_FILE, SM_FILE, COV_FILE, COV_LAYERS, EVAL_FILE, SHAPE_DIR,
REG_LIST, BUFFER, TRAIN_DIR, MONTH,
EVAL_DIR, USE_PCA, VALIDATE, STATS_FILE="", RAND=0, SUPER=0, MIN_T_POINTS=-1):
MASK_PATH = pathlib.Path("create_shape.R").resolve()
CROP_PATH = pathlib.Path("crop_to_shape.R").resolve()
ADD_COV_PATH = pathlib.Path("add_topos.R").resolve()
DROP_COLS_PATH = pathlib.Path("drop_cols.py").resolve()
# Prepare shape for cropping.
def create_shape(reg_type, reg, SHAPE_DIR=SHAPE_DIR):
if not SHAPE_DIR.is_dir():
SHAPE_DIR.mkdir(parents=True)
SHAPE_FILE = SHAPE_DIR.joinpath(f"{reg}.rds")
if SHAPE_FILE.is_file():
log.write(f"shape for {reg} exists in {SHAPE_DIR}\n")
else:
shape_args = [MASK_PATH, reg_type, reg, SHAPE_FILE]
log.write(f"{shape_args}\n")
#print(shape_args)
bash(shape_args)
log.write(f"Created shape for {reg} in {SHAPE_DIR}")
return SHAPE_FILE
print(f"Curation log file: {LOG_FILE}")
with open(LOG_FILE, "w") as log:
log.write("----------------------------------------\n")
log.write("Begin data processing with the following arguments...\n")
#https://stackoverflow.com/questions/582056/getting-list-of-parameter-names-inside-python-function
frame = inspect.currentframe()
args, _, _, vals = inspect.getargvalues(frame)
for i in args:
log.write(f"{i}={vals[i]}\n")
log.write("----------------------------------------\n")
# Establish random seed:
if (VALIDATE<=1) or (VALIDATE>=2):
seed=0
else:
if RAND:
seed = int(RAND)
else:
seed = randint(2**16)
log.write(f"For randomization, using {seed}.\n")
#suffix = ""
MONTH_DICT[MONTH] = {}
suffix = f"month{MONTH}"
if SUPER:
suffix += "-LvlUp"
if BUFFER:
suffix += f"-{BUFFER}meter"
MONTH_DICT[MONTH]["buffer"] = BUFFER
if USE_PCA:
suffix += "-PCA"
if seed:
suffix += f"-{VALIDATE-1:.2f}_{seed}"
MONTH_DICT[MONTH]["seed"] = seed
########################################
# Create train and eval files
if VALIDATE:
SM_BEFORE = TRAIN_DIR.parent.joinpath("original_sm-"+suffix)
log.write(f"Soil Moisture data from before preprocessing will go in {SM_BEFORE}\n")
if not SM_BEFORE.is_dir():
SM_BEFORE.mkdir(parents=True)
else:
SM_BEFORE = None
if SM_FILE:
log.write("Extracting sm data from the specified source.\n")
for reg_type,reg in REG_LIST:
if not TRAIN_DIR.is_dir():
TRAIN_DIR.mkdir(parents=True)
REG_TR_FILE = TRAIN_DIR.joinpath(f"{reg_type}_{reg}.csv")
if SUPER and (reg_type=="ECOREGION" or reg_type=="CEC"):
reg = ".".join(reg.split(".")[:-1])
SHAPE_FILE = create_shape(reg_type, reg)
# Crop soil moisture file to shape.
crop_args = [CROP_PATH, SM_FILE, SHAPE_FILE, REG_TR_FILE, BUFFER]
#print(crop_args)
log.write(f"{crop_args}\n")
bash(crop_args)
if COV_FILE:
cov_args = [ADD_COV_PATH, REG_TR_FILE, COV_FILE, REG_TR_FILE] + COV_LAYERS
log.write(f"{cov_args}\n")
bash(cov_args)
else:
log.write("No SM_FILE specified, so train folder assumed populated.\n")
if EVAL_FILE:
log.write("Creating eval files from specified source.\n")
for reg_type, reg in REG_LIST:
log.write(f"Creating EVAL file for {reg}.\n")
if not EVAL_DIR.is_dir():
EVAL_DIR.mkdir(parents=True)
REG_EV_FILE = EVAL_DIR.joinpath(f"{reg_type}_{reg}.csv")
SHAPE_FILE = create_shape(reg_type, reg)
# Crop evaluation file to shape.
crop_args = [CROP_PATH, EVAL_FILE, SHAPE_FILE, REG_EV_FILE]
log.write(f"{crop_args}\n")
bash(crop_args)
if VALIDATE==2:
VALID_FILE = SM_BEFORE.joinpath(REG_EV_FILE.name)
log.write(f"cp {REG_EV_FILE} {VALID_FILE}")
bash(["cp", REG_EV_FILE, VALID_FILE])
print(f"{DROP_COLS_PATH} {REG_EV_FILE} {REG_EV_FILE} -k 0,1")
bash([DROP_COLS_PATH, REG_EV_FILE, REG_EV_FILE, "-k", "0,1"])
if COV_FILE:
cov_args = [ADD_COV_PATH, REG_EV_FILE, COV_FILE, REG_EV_FILE] + COV_LAYERS
log.write(f"{cov_args}\n")
bash(cov_args)
elif COV_FILE:
log.write("Extracting covariate data from the specified source.\n")
for reg_type, reg in REG_LIST:
log.write(f"Creating EVAL file for {reg}.\n")
if not EVAL_DIR.is_dir():
EVAL_DIR.mkdir(parents=True)
REG_EV_FILE = EVAL_DIR.joinpath(f"{reg_type}_{reg}.csv")
SHAPE_FILE = create_shape(reg_type, reg)
# Crop covariate file to shape.
crop_args = [CROP_PATH, COV_FILE, SHAPE_FILE, REG_EV_FILE, 0] + COV_LAYERS
#print(crop_args)
log.write(f"{crop_args}\n")
bash(crop_args)
if VALIDATE==2:
VALID_FILE = SM_BEFORE.joinpath(REG_EV_FILE.name)
log.write(f"cp {REG_EV_FILE} {VALID_FILE}")
bash(["cp", REG_EV_FILE, VALID_FILE])
else:
log.write("No EVAL_FILE or COV_FILE specified, so eval folder assumed populated.\n")
########################################
# Compute statistics on train files
if STATS_FILE:
stat_args = [STATS_FILE, TRAIN_DIR]
log.write(f"{stat_args}\n")
bash(stat_args)
########################################
# Process train and eval files
TRAIN_DIR_TEMP = append_to_folder(TRAIN_DIR, "-postproc-"+suffix)
log.write(f"Processed training data to go in {TRAIN_DIR_TEMP}\n")
if TRAIN_DIR_TEMP.is_dir():
rmtree(TRAIN_DIR_TEMP)
TRAIN_DIR_TEMP.mkdir(parents=True)
EVAL_DIR_TEMP = append_to_folder(EVAL_DIR, "-postproc-"+suffix)
log.write(f"Processed evaluation data to go in {EVAL_DIR_TEMP}\n")
if EVAL_DIR_TEMP.is_dir():
rmtree(EVAL_DIR_TEMP)
EVAL_DIR_TEMP.mkdir(parents=True)
for reg_type, reg in REG_LIST:
region = f"{reg_type}_{reg}.csv"
if not os.path.isfile(TRAIN_DIR.joinpath(region)):
continue
tdf = pd.read_csv(TRAIN_DIR.joinpath(region))#, dtype=float)#.astype(object).infer_objects()
#print(f"before: {tdf.columns}")
tdf.rename(columns=alphanumeric, inplace=True)
#print(f"after: {tdf.columns}")
if not os.path.isfile(EVAL_DIR.joinpath(region)):
continue
edf = pd.read_csv(EVAL_DIR.joinpath(region))#, dtype=float)#.astype(object).infer_objects()
log.write(f"imported edf; first 3 rows:\n{edf.head(3)}\n")
#print(f"before: {edf.columns}")
edf.rename(columns=alphanumeric, inplace=True)
ecols = {edf.columns[0]: tdf.columns[0], edf.columns[1]: tdf.columns[1]}
edf.rename(columns=ecols, inplace=True)
#print(f"after: {edf.columns}")
if MONTH:
replacements = ps.monthify(tdf.columns)
tdf = tdf.rename(columns=replacements)
tdf = ps.keep_month(tdf, MONTH)
######################################################
## Dealing with NAs
######################################################
# Show how many non-NA's there are in each column
log.write(f"Number of non-NA values in tdf by column:\n{tdf.count()}\n")
log.write(f"Number of non-NA values in edf by column:\n{edf.count()}\n")
# LSF is mostly NA in this region; replace it with 0, appropriate for a costal pixel
# Dict of cols with specified NA replacement value
bad_cols = {"LSF":0}
tdf.fillna(value=bad_cols, inplace=True)#[["LSF"]] = tdf[["LSF"]].fillna(0)
edf.fillna(value=bad_cols, inplace=True)#[["LSF"]] = edf[["LSF"]].fillna(0)
for col in bad_cols:
log.write(f"NA's in '{col}' replaced with {bad_cols[col]}.\n")
# Show how many non-NA's there are in each column
#log.write(f"Number of non-NA values in tdf by column:\n{tdf.count()}\n")
#log.write(f"Number of non-NA values in edf by column:\n{edf.count()}\n")
tdf = tdf.dropna()#thresh=4).fillna(0)
log.write(f"First 3 rows of tdf:\n{tdf.head(3)}\n")
#log.write(f"Number of non-NA values in tdf by column:\n{tdf.count()}\n")
edf = edf.dropna()#thresh=4).fillna(0)
log.write(f"First 3 rows of edf:\n{edf.head(3)}\n")
#log.write(f"Number of non-NA values in edf by column:\n{edf.count()}\n")
############################################
trows = tdf.shape[0]
if trows:
log.write(f"There are {trows} training points in {region}.\n")
else:
log.write(f"Warning: there are no training points in {region}!\n")
continue
erows = edf.shape[0]
if erows:
log.write(f"There are {erows} evaluation points in {region}.\n")
else:
log.write(f"Warning: there are no evaluation points in {region}!\n")
continue
if floor(VALIDATE)==1:
before = tdf[tdf.columns[:3]]#.dropna()
if VALIDATE>1:
log.write(f"For before.sample, {seed}.\n")
before = before.sample(frac=(VALIDATE - 1), random_state=seed)
tdf.drop(before.index.tolist(), inplace=True)
trows = tdf.shape[0]
if trows:
log.write(f"There are {trows} training points in {region}.\n")
else:
log.write(f"Warning: there are no training points in {region}!\n")
continue
brows = before.shape[0]
if brows:
log.write(f"There are {brows} validation points in {region}.\n")
else:
log.write(f"Warning: there are no validation points in {region}!\n")
continue
before_path = SM_BEFORE.joinpath(region)
before.to_csv(path_or_buf=before_path, index=False, header=False, na_rep="NA")
if BUFFER or SUPER:
log.write("Trimming validation file back down to {region}.\n")
crop_args = [CROP_PATH, before_path, before_path, r]
log.write(f"{crop_args}\n")
bash(crop_args)
if USE_PCA:
params = pca.get_params(tdf)
log.write(f"Performing PCA.\n")
#log.write(f"tdf pre-PCA: {tdf.shape}\n{tdf.head(3)}\n")
#log.write(f"edf pre-PCA: {edf.shape}\n{edf.head(3)}\n")
log.write(f"pre-PCA:\n{params}\n")
if len(params) > min(tdf.shape[0], edf.shape[0]):
log.write(f"Error: region {region} skipped! You have {tdf.shape[0]} rows of training data and {edf.shape[0]} rows of evaluation data, but you need at least {len(params)} of each to perform PCA on your params.\n")
continue
tdf, edf, comps = pca.joint_pca(tdf, edf, params)
log.write(f"post-PCA:\n{tdf.shape}\n{tdf.head(3)}\n{edf.shape}\n{edf.head(3)}\n{comps}\n")
log.write(f"Completed PCA for {region} with these eigenvalues:\n{comps}\n")
trows = tdf.shape[0]
if trows:
log.write(f"There are {trows} training points in {region}.\n")
else:
log.write(f"Warning: there are no training points in {region}!\n")
continue
erows = edf.shape[0]
if erows:
log.write(f"There are {erows} evaluation points in {region}.\n")
else:
log.write(f"Warning: there are no evaluation points in {region}!\n")
continue
tdf.to_csv(path_or_buf=TRAIN_DIR_TEMP.joinpath(region), index=False)
edf.to_csv(path_or_buf=EVAL_DIR_TEMP.joinpath(region), index=False)
TRAIN_DIR = TRAIN_DIR_TEMP
EVAL_DIR = EVAL_DIR_TEMP
# Update region list to only include those regions with at least a minimum number of test points
if (MIN_T_POINTS > -1):
NEW_REG_LIST = []
for reg_type, reg in REG_LIST:
REG_TR_FILE = TRAIN_DIR.joinpath(f"{reg_type}_{reg}.csv")
if REG_TR_FILE.is_file():
with open(REG_TR_FILE ,'r') as regtrfile:
num_lines = sum(1 for line in regtrfile)
if num_lines > MIN_T_POINTS:
NEW_REG_LIST.append((reg_type, reg))
log.write(f"Region {reg} has {num_lines - 1} data points ({MIN_T_POINTS} required). Kept in region list.\n")
else:
log.write(f"Warning! Region {reg} only has {num_lines - 1} data points ({MIN_T_POINTS} required). Removed from region list.\n")
remove(REG_TR_FILE)
else:
log.write(f"Warning! Region {reg} does not have a test file. Removed from region list.\n")
REG_LIST = NEW_REG_LIST
NEW_REG_FILE = LOG_FILE.with_suffix(f".{MONTH}reg")
with open(NEW_REG_FILE, "w") as reg_out:
for reg_type, reg in REG_LIST:
reg_out.write(f"{reg_type},{reg}\n")
###############################################
log.write("Data curation complete!!\n")
return(SM_BEFORE, TRAIN_DIR, EVAL_DIR, REG_LIST, seed, suffix)
|
the-stack_0_4715 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts bounding boxes from a list of images, saving them to files.
The images must be in JPG format. The program checks if boxes already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from delf import box_io
from delf import utils
from delf import detector
cmd_args = None
# Extension/suffix of produced files.
_BOX_EXT = '.boxes'
_VIZ_SUFFIX = '_viz.jpg'
# Used for plotting boxes.
_BOX_EDGE_COLORS = ['r', 'y', 'b', 'm', 'k', 'g', 'c', 'w']
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.io.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def _FilterBoxesByScore(boxes, scores, class_indices, score_threshold):
"""Filter boxes based on detection scores.
Boxes with detection score >= score_threshold are returned.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
score_threshold: Float detection score threshold to use.
Returns:
selected_boxes: selected `boxes`.
selected_scores: selected `scores`.
selected_class_indices: selected `class_indices`.
"""
selected_boxes = []
selected_scores = []
selected_class_indices = []
for i, box in enumerate(boxes):
if scores[i] >= score_threshold:
selected_boxes.append(box)
selected_scores.append(scores[i])
selected_class_indices.append(class_indices[i])
return np.array(selected_boxes), np.array(selected_scores), np.array(
selected_class_indices)
def _PlotBoxesAndSaveImage(image, boxes, output_path):
"""Plot boxes on image and save to output path.
Args:
image: Numpy array containing image.
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
output_path: String containing output path.
"""
height = image.shape[0]
width = image.shape[1]
fig, ax = plt.subplots(1)
ax.imshow(image)
for i, box in enumerate(boxes):
scaled_box = [
box[0] * height, box[1] * width, box[2] * height, box[3] * width
]
rect = patches.Rectangle([scaled_box[1], scaled_box[0]],
scaled_box[3] - scaled_box[1],
scaled_box[2] - scaled_box[0],
linewidth=3,
edgecolor=_BOX_EDGE_COLORS[i %
len(_BOX_EDGE_COLORS)],
facecolor='none')
ax.add_patch(rect)
ax.axis('off')
plt.savefig(output_path, bbox_inches='tight')
plt.close(fig)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of images.
print('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
print(f'done! Found {num_images} images')
# Create output directories if necessary.
if not tf.io.gfile.exists(cmd_args.output_dir):
tf.io.gfile.makedirs(cmd_args.output_dir)
if cmd_args.output_viz_dir and not tf.io.gfile.exists(
cmd_args.output_viz_dir):
tf.io.gfile.makedirs(cmd_args.output_viz_dir)
detector_fn = detector.MakeDetector(cmd_args.detector_path)
start = time.time()
for i, image_path in enumerate(image_paths):
# Report progress once in a while.
if i == 0:
print('Starting to detect objects in images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print(f'Processing image {i} out of {num_images}, last '
f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds')
start = time.time()
# If descriptor already exists, skip its computation.
base_boxes_filename, _ = os.path.splitext(os.path.basename(image_path))
out_boxes_filename = base_boxes_filename + _BOX_EXT
out_boxes_fullpath = os.path.join(cmd_args.output_dir, out_boxes_filename)
if tf.io.gfile.exists(out_boxes_fullpath):
print(f'Skipping {image_path}')
continue
im = np.expand_dims(np.array(utils.RgbLoader(image_paths[i])), 0)
# Extract and save boxes.
(boxes_out, scores_out, class_indices_out) = detector_fn(im)
(selected_boxes, selected_scores,
selected_class_indices) = _FilterBoxesByScore(boxes_out[0], scores_out[0],
class_indices_out[0],
cmd_args.detector_thresh)
box_io.WriteToFile(out_boxes_fullpath, selected_boxes, selected_scores,
selected_class_indices)
if cmd_args.output_viz_dir:
out_viz_filename = base_boxes_filename + _VIZ_SUFFIX
out_viz_fullpath = os.path.join(cmd_args.output_viz_dir, out_viz_filename)
_PlotBoxesAndSaveImage(im[0], selected_boxes, out_viz_fullpath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--detector_path',
type=str,
default='/tmp/d2r_frcnn_20190411/',
help="""
Path to exported detector model.
""")
parser.add_argument(
'--detector_thresh',
type=float,
default=.0,
help="""
Detector threshold. Any box with confidence score lower than this is not
returned.
""")
parser.add_argument(
'--list_images_path',
type=str,
default='list_images.txt',
help="""
Path to list of images to undergo object detection.
""")
parser.add_argument(
'--output_dir',
type=str,
default='test_boxes',
help="""
Directory where bounding boxes will be written to. Each image's boxes
will be written to a file with same name, and extension replaced by
.boxes.
""")
parser.add_argument(
'--output_viz_dir',
type=str,
default='',
help="""
Optional. If set, a visualization of the detected boxes overlaid on the
image is produced, and saved to this directory. Each image is saved with
_viz.jpg suffix.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
the-stack_0_4716 | # -*- coding: utf-8 -*-
'''
tests for user state
user absent
user present
user present with custom homedir
'''
# Import python libs
from __future__ import absolute_import
import os
import sys
from random import randint
import grp
# Import Salt Testing libs
import tests.integration as integration
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest, requires_system_grains
# Import salt libs
import salt.utils
if salt.utils.is_darwin():
USER = 'macuser'
GROUP = 'macuser'
GID = randint(400, 500)
NOGROUPGID = randint(400, 500)
else:
USER = 'nobody'
GROUP = 'nobody'
GID = 'nobody'
NOGROUPGID = 'nogroup'
class UserTest(integration.ModuleCase,
integration.SaltReturnAssertsMixIn):
'''
test for user absent
'''
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def setUp(self):
if salt.utils.is_darwin():
#on mac we need to add user, because there is
#no creationtime for nobody user.
add_user = self.run_function('user.add', [USER], gid=GID)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_absent(self):
ret = self.run_state('user.absent', name='unpossible')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_if_present(self):
ret = self.run_state('user.present', name=USER)
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_if_present_with_gid(self):
if self.run_function('group.info', [USER]):
ret = self.run_state('user.present', name=USER, gid=GID)
elif self.run_function('group.info', ['nogroup']):
ret = self.run_state('user.present', name=USER, gid=NOGROUPGID)
else:
self.skipTest(
'Neither \'nobody\' nor \'nogroup\' are valid groups'
)
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_not_present(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the minion.
And then destroys that user.
Assume that it will break any system you run it on.
'''
ret = self.run_state('user.present', name='salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_when_home_dir_does_not_18843(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the minion.
And then destroys that user.
Assume that it will break any system you run it on.
'''
if salt.utils.is_darwin():
HOMEDIR = '/Users/home_of_salt_test'
else:
HOMEDIR = '/home/home_of_salt_test'
ret = self.run_state('user.present', name='salt_test',
home=HOMEDIR)
self.assertSaltTrueReturn(ret)
self.run_function('file.absent', name=HOMEDIR)
ret = self.run_state('user.present', name='salt_test',
home=HOMEDIR)
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_nondefault(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
'''
ret = self.run_state('user.present', name='salt_test',
home='/var/lib/salt_test')
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir('/var/lib/salt_test'))
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
@requires_system_grains
def test_user_present_gid_from_name_default(self, grains=None):
'''
This is a DESTRUCTIVE TEST. It creates a new user on the on the minion.
This is an integration test. Not all systems will automatically create
a group of the same name as the user, but I don't have access to any.
If you run the test and it fails, please fix the code it's testing to
work on your operating system.
'''
# MacOS users' primary group defaults to staff (20), not the name of
# user
gid_from_name = False if grains['os_family'] == 'MacOS' else True
ret = self.run_state('user.present', name='salt_test',
gid_from_name=gid_from_name, home='/var/lib/salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_function('user.info', ['salt_test'])
self.assertReturnNonEmptySaltType(ret)
group_name = grp.getgrgid(ret['gid']).gr_name
self.assertTrue(os.path.isdir('/var/lib/salt_test'))
if grains['os_family'] in ('Suse',):
self.assertEqual(group_name, 'users')
elif grains['os_family'] == 'MacOS':
self.assertEqual(group_name, 'staff')
else:
self.assertEqual(group_name, 'salt_test')
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_gid_from_name(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
This is a unit test, NOT an integration test. We create a group of the
same name as the user beforehand, so it should all run smoothly.
'''
ret = self.run_state('group.present', name='salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.present', name='salt_test',
gid_from_name=True, home='/var/lib/salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_function('user.info', ['salt_test'])
self.assertReturnNonEmptySaltType(ret)
group_name = grp.getgrgid(ret['gid']).gr_name
self.assertTrue(os.path.isdir('/var/lib/salt_test'))
self.assertEqual(group_name, 'salt_test')
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_state('group.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
@skipIf(sys.getfilesystemencoding().startswith('ANSI'), 'A system encoding which supports Unicode characters must be set. Current setting is: {0}. Try setting $LANG=\'en_US.UTF-8\''.format(sys.getfilesystemencoding()))
def test_user_present_unicode(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
It ensures that unicode GECOS data will be properly handled, without
any encoding-related failures.
'''
ret = self.run_state(
'user.present', name='salt_test', fullname=u'Sålt Test', roomnumber=u'①②③',
workphone=u'١٢٣٤', homephone=u'६७८'
)
self.assertSaltTrueReturn(ret)
# Ensure updating a user also works
ret = self.run_state(
'user.present', name='salt_test', fullname=u'Sølt Test', roomnumber=u'①③②',
workphone=u'٣٤١٢', homephone=u'६८७'
)
self.assertSaltTrueReturn(ret)
# ret = self.run_state('user.absent', name='salt_test')
# self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_gecos(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
It ensures that numeric GECOS data will be properly coerced to strings,
otherwise the state will fail because the GECOS fields are written as
strings (and show up in the user.info output as such). Thus the
comparison will fail, since '12345' != 12345.
'''
ret = self.run_state(
'user.present', name='salt_test', fullname=12345, roomnumber=123,
workphone=1234567890, homephone=1234567890
)
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_gecos_none_fields(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
It ensures that if no GECOS data is supplied, the fields will be coerced
into empty strings as opposed to the string "None".
'''
ret = self.run_state(
'user.present', name='salt_test', fullname=None, roomnumber=None,
workphone=None, homephone=None
)
self.assertSaltTrueReturn(ret)
ret = self.run_function('user.info', ['salt_test'])
self.assertReturnNonEmptySaltType(ret)
self.assertEqual('', ret['fullname'])
# MacOS does not supply the following GECOS fields
if not salt.utils.is_darwin():
self.assertEqual('', ret['roomnumber'])
self.assertEqual('', ret['workphone'])
self.assertEqual('', ret['homephone'])
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def tearDown(self):
if salt.utils.is_darwin():
check_user = self.run_function('user.list_users')
if USER in check_user:
del_user = self.run_function('user.delete', [USER], remove=True)
|
the-stack_0_4718 | from __future__ import unicode_literals
from collections import defaultdict
import datetime
import json
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel, CloudFormationModel
from moto.core.utils import unix_time
from moto.core import ACCOUNT_ID
from .comparisons import get_comparison_func
class DynamoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, "to_json"):
return obj.to_json()
def dynamo_json_dump(dynamo_object):
return json.dumps(dynamo_object, cls=DynamoJsonEncoder)
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
"""
def __init__(self, type_as_dict):
self.type = list(type_as_dict.keys())[0]
self.value = list(type_as_dict.values())[0]
def __hash__(self):
return hash((self.type, self.value))
def __eq__(self, other):
return self.type == other.type and self.value == other.value
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
def to_json(self):
return {self.type: self.value}
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.value, *range_values)
class Item(BaseModel):
def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs):
self.hash_key = hash_key
self.hash_key_type = hash_key_type
self.range_key = range_key
self.range_key_type = range_key_type
self.attrs = {}
for key, value in attrs.items():
self.attrs[key] = DynamoType(value)
def __repr__(self):
return "Item: {0}".format(self.to_json())
def to_json(self):
attributes = {}
for attribute_key, attribute in self.attrs.items():
attributes[attribute_key] = attribute.value
return {"Attributes": attributes}
def describe_attrs(self, attributes):
if attributes:
included = {}
for key, value in self.attrs.items():
if key in attributes:
included[key] = value
else:
included = self.attrs
return {"Item": included}
class Table(CloudFormationModel):
def __init__(
self,
name,
hash_key_attr,
hash_key_type,
range_key_attr=None,
range_key_type=None,
read_capacity=None,
write_capacity=None,
):
self.name = name
self.hash_key_attr = hash_key_attr
self.hash_key_type = hash_key_type
self.range_key_attr = range_key_attr
self.range_key_type = range_key_type
self.read_capacity = read_capacity
self.write_capacity = write_capacity
self.created_at = datetime.datetime.utcnow()
self.items = defaultdict(dict)
@property
def has_range_key(self):
return self.range_key_attr is not None
@property
def describe(self):
results = {
"Table": {
"CreationDateTime": unix_time(self.created_at),
"KeySchema": {
"HashKeyElement": {
"AttributeName": self.hash_key_attr,
"AttributeType": self.hash_key_type,
}
},
"ProvisionedThroughput": {
"ReadCapacityUnits": self.read_capacity,
"WriteCapacityUnits": self.write_capacity,
},
"TableName": self.name,
"TableStatus": "ACTIVE",
"ItemCount": len(self),
"TableSizeBytes": 0,
}
}
if self.has_range_key:
results["Table"]["KeySchema"]["RangeKeyElement"] = {
"AttributeName": self.range_key_attr,
"AttributeType": self.range_key_type,
}
return results
@staticmethod
def cloudformation_name_type():
return "TableName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html
return "AWS::DynamoDB::Table"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
key_attr = [
i["AttributeName"]
for i in properties["KeySchema"]
if i["KeyType"] == "HASH"
][0]
key_type = [
i["AttributeType"]
for i in properties["AttributeDefinitions"]
if i["AttributeName"] == key_attr
][0]
spec = {
"name": properties["TableName"],
"hash_key_attr": key_attr,
"hash_key_type": key_type,
}
# TODO: optional properties still missing:
# range_key_attr, range_key_type, read_capacity, write_capacity
return Table(**spec)
def __len__(self):
count = 0
for key, value in self.items.items():
if self.has_range_key:
count += len(value)
else:
count += 1
return count
def __nonzero__(self):
return True
def __bool__(self):
return self.__nonzero__()
def put_item(self, item_attrs):
hash_value = DynamoType(item_attrs.get(self.hash_key_attr))
if self.has_range_key:
range_value = DynamoType(item_attrs.get(self.range_key_attr))
else:
range_value = None
item = Item(
hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs
)
if range_value:
self.items[hash_value][range_value] = item
else:
self.items[hash_value] = item
return item
def get_item(self, hash_key, range_key):
if self.has_range_key and not range_key:
raise ValueError(
"Table has a range key, but no range key was passed into get_item"
)
try:
if range_key:
return self.items[hash_key][range_key]
else:
return self.items[hash_key]
except KeyError:
return None
def query(self, hash_key, range_comparison, range_objs):
results = []
last_page = True # Once pagination is implemented, change this
if self.range_key_attr:
possible_results = self.items[hash_key].values()
else:
possible_results = list(self.all_items())
if range_comparison:
for result in possible_results:
if result.range_key.compare(range_comparison, range_objs):
results.append(result)
else:
# If we're not filtering on range key, return all values
results = possible_results
return results, last_page
def all_items(self):
for hash_set in self.items.values():
if self.range_key_attr:
for item in hash_set.values():
yield item
else:
yield hash_set
def scan(self, filters):
results = []
scanned_count = 0
last_page = True # Once pagination is implemented, change this
for result in self.all_items():
scanned_count += 1
passes_all_conditions = True
for (
attribute_name,
(comparison_operator, comparison_objs),
) in filters.items():
attribute = result.attrs.get(attribute_name)
if attribute:
# Attribute found
if not attribute.compare(comparison_operator, comparison_objs):
passes_all_conditions = False
break
elif comparison_operator == "NULL":
# Comparison is NULL and we don't have the attribute
continue
else:
# No attribute found and comparison is no NULL. This item
# fails
passes_all_conditions = False
break
if passes_all_conditions:
results.append(result)
return results, scanned_count, last_page
def delete_item(self, hash_key, range_key):
try:
if range_key:
return self.items[hash_key].pop(range_key)
else:
return self.items.pop(hash_key)
except KeyError:
return None
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "StreamArn":
region = "us-east-1"
time = "2000-01-01T00:00:00.000"
return "arn:aws:dynamodb:{0}:{1}:table/{2}/stream/{3}".format(
region, ACCOUNT_ID, self.name, time
)
raise UnformattedGetAttTemplateException()
class DynamoDBBackend(BaseBackend):
def __init__(self):
self.tables = OrderedDict()
def create_table(self, name, **params):
table = Table(name, **params)
self.tables[name] = table
return table
def delete_table(self, name):
return self.tables.pop(name, None)
def update_table_throughput(self, name, new_read_units, new_write_units):
table = self.tables[name]
table.read_capacity = new_read_units
table.write_capacity = new_write_units
return table
def put_item(self, table_name, item_attrs):
table = self.tables.get(table_name)
if not table:
return None
return table.put_item(item_attrs)
def get_item(self, table_name, hash_key_dict, range_key_dict):
table = self.tables.get(table_name)
if not table:
return None
hash_key = DynamoType(hash_key_dict)
range_key = DynamoType(range_key_dict) if range_key_dict else None
return table.get_item(hash_key, range_key)
def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts):
table = self.tables.get(table_name)
if not table:
return None, None
hash_key = DynamoType(hash_key_dict)
range_values = [DynamoType(range_value) for range_value in range_value_dicts]
return table.query(hash_key, range_comparison, range_values)
def scan(self, table_name, filters):
table = self.tables.get(table_name)
if not table:
return None, None, None
scan_filters = {}
for key, (comparison_operator, comparison_values) in filters.items():
dynamo_types = [DynamoType(value) for value in comparison_values]
scan_filters[key] = (comparison_operator, dynamo_types)
return table.scan(scan_filters)
def delete_item(self, table_name, hash_key_dict, range_key_dict):
table = self.tables.get(table_name)
if not table:
return None
hash_key = DynamoType(hash_key_dict)
range_key = DynamoType(range_key_dict) if range_key_dict else None
return table.delete_item(hash_key, range_key)
dynamodb_backend = DynamoDBBackend()
|
the-stack_0_4721 | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_waas_protection_rules
short_description: Manage a ProtectionRules resource in Oracle Cloud Infrastructure
description:
- This module allows the user to update a ProtectionRules resource in Oracle Cloud Infrastructure
version_added: "2.9.0"
author: Oracle (@oracle)
options:
waas_policy_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WAAS policy.
type: str
aliases: ["id"]
required: true
protection_rules:
description:
- ""
type: list
elements: dict
required: true
suboptions:
key:
description:
- The unique key of the protection rule.
- This parameter is updatable.
type: str
required: true
action:
description:
- The action to apply to the protection rule. If unspecified, defaults to `OFF`.
- This parameter is updatable.
type: str
choices:
- "OFF"
- "DETECT"
- "BLOCK"
required: true
exclusions:
description:
- The types of requests excluded from the protection rule action. If the requests matches the criteria in the `exclusions`, the protection
rule action will not be executed.
type: list
elements: dict
suboptions:
target:
description:
- The target of the exclusion.
- This parameter is updatable.
type: str
choices:
- "REQUEST_COOKIES"
- "REQUEST_COOKIE_NAMES"
- "ARGS"
- "ARGS_NAMES"
exclusions:
description:
- ""
- This parameter is updatable.
type: list
elements: str
state:
description:
- The state of the ProtectionRules.
- Use I(state=present) to update an existing a ProtectionRules.
type: str
required: false
default: 'present'
choices: ["present"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Update protection_rules
oci_waas_protection_rules:
waas_policy_id: "ocid1.waaspolicy.oc1..xxxxxxEXAMPLExxxxxx"
protection_rules:
- key: key_example
action: OFF
"""
RETURN = """
protection_rules:
description:
- Details of the ProtectionRules resource acted upon by the current operation
returned: on success
type: complex
contains:
key:
description:
- The unique key of the protection rule.
returned: on success
type: str
sample: key_example
mod_security_rule_ids:
description:
- The list of the ModSecurity rule IDs that apply to this protection rule. For more information about ModSecurity's open source WAF rules, see
L(Mod Security's documentation,https://www.modsecurity.org/CRS/Documentation/index.html).
returned: on success
type: list
sample: []
name:
description:
- The name of the protection rule.
returned: on success
type: str
sample: name_example
description:
description:
- The description of the protection rule.
returned: on success
type: str
sample: description_example
action:
description:
- The action to take when the traffic is detected as malicious. If unspecified, defaults to `OFF`.
returned: on success
type: str
sample: OFF
labels:
description:
- The list of labels for the protection rule.
- "**Note:** Protection rules with a `ResponseBody` label will have no effect unless `isResponseInspected` is true."
returned: on success
type: list
sample: []
exclusions:
description:
- ""
returned: on success
type: complex
contains:
target:
description:
- The target of the exclusion.
returned: on success
type: str
sample: REQUEST_COOKIES
exclusions:
description:
- ""
returned: on success
type: list
sample: []
sample: {
"key": "key_example",
"mod_security_rule_ids": [],
"name": "name_example",
"description": "description_example",
"action": "OFF",
"labels": [],
"exclusions": [{
"target": "REQUEST_COOKIES",
"exclusions": []
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.waas import WaasClient
from oci.waas.models import ProtectionRuleAction
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ProtectionRulesHelperGen(OCIResourceHelperBase):
"""Supported operations: update, get and list"""
def get_module_resource_id_param(self):
return "waas_policy_id"
def get_module_resource_id(self):
return self.module.params.get("waas_policy_id")
def get_get_fn(self):
return self.client.get_protection_rule
def get_resource(self):
return oci_common_utils.get_default_response_from_resource(
oci_common_utils.list_all_resources(
self.client.get_protection_rule,
waas_policy_id=self.module.params.get("waas_policy_id"),
protection_rule_key=self.module.params.get("protection_rule_key"),
)
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"waas_policy_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
return dict()
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_protection_rules, **kwargs
)
def get_update_model_class(self):
return ProtectionRuleAction
def get_update_model(self):
if self.module.params.get("protection_rules"):
return [
oci_common_utils.convert_input_data_to_model_class(
resource, self.get_update_model_class()
)
for resource in self.module.params["protection_rules"]
]
return []
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_protection_rules,
call_fn_args=(),
call_fn_kwargs=dict(
waas_policy_id=self.module.params.get("waas_policy_id"),
protection_rules=update_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
ProtectionRulesHelperCustom = get_custom_class("ProtectionRulesHelperCustom")
class ResourceHelper(ProtectionRulesHelperCustom, ProtectionRulesHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=True
)
module_args.update(
dict(
waas_policy_id=dict(aliases=["id"], type="str", required=True),
protection_rules=dict(
type="list",
elements="dict",
required=True,
options=dict(
key=dict(type="str", required=True, no_log=True),
action=dict(
type="str", required=True, choices=["OFF", "DETECT", "BLOCK"]
),
exclusions=dict(
type="list",
elements="dict",
options=dict(
target=dict(
type="str",
choices=[
"REQUEST_COOKIES",
"REQUEST_COOKIE_NAMES",
"ARGS",
"ARGS_NAMES",
],
),
exclusions=dict(type="list", elements="str"),
),
),
),
),
state=dict(type="str", default="present", choices=["present"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="protection_rules",
service_client_class=WaasClient,
namespace="waas",
)
result = dict(changed=False)
if resource_helper.is_update():
result = resource_helper.update()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
the-stack_0_4722 | #!/usr/bin/env python3
import re
import sys
import sqlite3
import traceback
import os
__location__ = os.path.realpath(
os.path.join(
os.getcwd(),
os.path.dirname(__file__)
)
)
input_failures = 0
try:
DATABASE_NAME = os.path.join(__location__, 'data.sqlite')
conn = sqlite3.connect(DATABASE_NAME)
i = 0
for line in sys.stdin:
l = line.strip()
# Groups: 1 2 3 4 5
match = re.search('^(\w+)\s+([\w\-\:]+)\s+([\w\-]+)\s+(\w+|-)\s+OK(.*)$', l)
if not match:
input_failures += 1
print(f'Error: Not matched input line: {l}')
continue
date_part = match.group(2).split('T')
data = {
'date': date_part[0],
'time': '',
'area': match.group(1),
'tested': '',
'confirmed': match.group(3),
'new_hospitalized': '',
'hospitalized': '',
'icu': '',
'vent': '',
'released': '',
'deceased': match.group(4),
'source': '',
}
if len(date_part) == 2:
data['time'] = date_part[1]
if data['confirmed'] == '-':
data['confirmed'] = ''
else:
data['confirmed'] = int(data['confirmed'])
if data['deceased'] == '-':
data['deceased'] = ''
else:
data['deceased'] = int(data['deceased'])
# Parse optional data.
rest = match.group(5)
extras_match = re.search('# Extras: ([^#]+)', rest)
if extras_match:
try:
extras = extras_match.group(1).strip()
extras = extras.split(',')
extras = { kv.split('=', 2)[0]: int(kv.split('=', 2)[1]) for kv in extras }
if 'current_hosp' in extras:
data['hospitalized'] = extras['current_hosp']
if 'current_icu' in extras:
data['icu'] = extras['current_icu']
if 'current_vent' in extras:
data['vent'] = extras['current_vent']
if 'ncumul_released' in extras:
data['released'] = extras['ncumul_released']
except Exception as e:
print(f'Error: Parsing optional data failed, ignoring: {extras_match.group(1)}')
# Parse URLs
url_match = re.search('# URLs: ([^#]+)', rest)
try:
url_source = url_match.group(1).strip().split(', ')[-1]
except (TypeError, IndexError):
url_source = ''
if 'SCRAPER_SOURCE' in os.environ:
data['source'] = os.environ['SCRAPER_SOURCE']
elif url_source:
data['source'] = url_source
c = conn.cursor()
try:
print(data)
c.execute(
'''
INSERT INTO data (
date,
time,
abbreviation_canton_and_fl,
ncumul_tested,
ncumul_conf,
new_hosp,
current_hosp,
current_icu,
current_vent,
ncumul_released,
ncumul_deceased,
source
)
VALUES
(?,?,?,?,?,?,?,?,?,?,?,?)
''',
[
data['date'],
data['time'],
data['area'],
data['tested'],
data['confirmed'],
data['new_hospitalized'],
data['hospitalized'],
data['icu'],
data['vent'],
data['released'],
data['deceased'],
data['source'],
]
)
print("Successfully added new entry.")
except sqlite3.IntegrityError:
if os.environ.get('SCRAPER_OVERWRITE') == 'yes':
c.execute(
'''
UPDATE data
SET
time = ? ,
ncumul_tested = ? ,
ncumul_conf = ? ,
new_hosp = ? ,
current_hosp = ? ,
current_icu = ? ,
current_vent = ? ,
ncumul_released = ? ,
ncumul_deceased = ?,
source = ?
WHERE date = ?
AND abbreviation_canton_and_fl = ?
''',
[
data['time'],
data['tested'],
data['confirmed'],
data['new_hospitalized'],
data['hospitalized'],
data['icu'],
data['vent'],
data['released'],
data['deceased'],
data['source'],
data['date'],
data['area'],
]
)
print("Successfully updated entry.")
else:
print("Error: Data for this date has already been added")
finally:
conn.commit()
except Exception as e:
print("Error: %s" % e, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
sys.exit(1)
finally:
conn.close()
if input_failures:
sys.exit(1)
|
the-stack_0_4723 | import asyncio
import base64
from pathlib import Path
from subprocess import Popen
from tempfile import mkstemp
import concurrent.futures
import urllib.parse
from nbconvert.exporters import Exporter, HTMLExporter
import aiohttp
from ._screenshot import get_chrome_path
async def handler(ws, data, key=None):
await ws.send_json(data)
async for msg in ws:
msg_json = msg.json()
if 'result' in msg_json:
result = msg_json['result'].get(key)
break
return result
async def main(file_name, p):
async with aiohttp.ClientSession() as session:
connected = False
await asyncio.sleep(1)
for _ in range(10):
try:
resp = await session.get('http://localhost:9222/json')
data = await resp.json()
page_url = data[0]['webSocketDebuggerUrl']
connected = True
except:
await asyncio.sleep(1)
if connected:
break
if not connected:
p.kill()
raise Exception('Could not connect to chrome server')
async with session.ws_connect(page_url, receive_timeout=3, max_msg_size=0) as ws:
# first - navigate to html page
params = {'url': file_name}
data = {'id': 1, 'method': 'Page.navigate', 'params': params}
frameId = await handler(ws, data, 'frameId')
# second - enable page
# await asyncio.sleep(1)
data = {'id': 2, 'method': 'Page.enable'}
await handler(ws, data)
# third - get html
params = {'frameId': frameId, 'url': file_name}
data = {'id': 3, 'method': 'Page.getResourceContent', 'params': params}
await handler(ws, data, 'content')
# fourth - get pdf
await asyncio.sleep(1)
params = {'displayHeaderFooter': False, 'printBackground': True}
data = {'id': 4, 'method': 'Page.printToPDF', 'params': params}
pdf_data = await handler(ws, data, 'data')
pdf_data = base64.b64decode(pdf_data)
return pdf_data
def launch_chrome():
chrome_path = get_chrome_path()
args = [chrome_path,
'--headless',
'--disable-gpu',
'--run-all-compositor-stages-before-draw',
'--remote-debugging-port=9222'
]
p = Popen(args=args)
return p
def get_html_data(nb, resources, **kw):
he = HTMLExporter()
html_data, resources = he.from_notebook_node(nb, resources, **kw)
html_data = html_data.replace('@media print', '@media xxprintxx')
return html_data
def get_pdf_data(file_name, p):
try:
from asyncio import run
except ImportError:
from ._my_asyncio import run
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future = executor.submit(run, main(file_name, p))
return future.result()
class BrowserExporter(Exporter):
def _file_extension_default(self):
return '.pdf'
def from_notebook_node(self, nb, resources=None, **kw):
resources['output_extension'] = '.pdf'
nb_home = resources['metadata']['path']
p = launch_chrome()
html_data = get_html_data(nb, resources, **kw)
_, tf_name = mkstemp(dir=nb_home, suffix='.html')
with open(tf_name, 'w') as f:
f.write(html_data)
tf_path = Path(tf_name)
full_file_name = 'file://' + urllib.parse.quote(tf_name)
pdf_data = get_pdf_data(full_file_name, p)
import os
os.remove(tf_path)
p.kill()
return pdf_data, resources
|
the-stack_0_4725 | """
Precisely APIs
Enhance & enrich your data, applications, business processes, and workflows with rich location, information, and identify APIs. # noqa: E501
The version of the OpenAPI document: 11.9.3
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from com.precisely.apis.api_client import ApiClient, Endpoint as _Endpoint
from com.precisely.apis.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from com.precisely.apis.model.geo_location_access_point import GeoLocationAccessPoint
from com.precisely.apis.model.geo_location_ip_addr import GeoLocationIpAddr
class GeolocationServiceApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_location_by_ip_address_endpoint = _Endpoint(
settings={
'response_type': (GeoLocationIpAddr,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/geolocation/v1/location/byipaddress',
'operation_id': 'get_location_by_ip_address',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'ip_address',
],
'required': [
'ip_address',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'ip_address':
(str,),
},
'attribute_map': {
'ip_address': 'ipAddress',
},
'location_map': {
'ip_address': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/xml',
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_location_by_wi_fi_access_point_endpoint = _Endpoint(
settings={
'response_type': (GeoLocationAccessPoint,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/geolocation/v1/location/byaccesspoint',
'operation_id': 'get_location_by_wi_fi_access_point',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'mac',
'ssid',
'rsid',
'speed',
'access_point',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'mac':
(str,),
'ssid':
(str,),
'rsid':
(str,),
'speed':
(str,),
'access_point':
(str,),
},
'attribute_map': {
'mac': 'mac',
'ssid': 'ssid',
'rsid': 'rsid',
'speed': 'speed',
'access_point': 'accessPoint',
},
'location_map': {
'mac': 'query',
'ssid': 'query',
'rsid': 'query',
'speed': 'query',
'access_point': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/xml',
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def get_location_by_ip_address(
self,
ip_address,
**kwargs
):
"""Location By IP Address. # noqa: E501
This service accepts an IP address and returns the location coordinates corresponding to that IP address. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_location_by_ip_address(ip_address, async_req=True)
>>> result = thread.get()
Args:
ip_address (str): This is the ip address of network connected device. It must be a standard IPv4 octet and a valid external address.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeoLocationIpAddr
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['ip_address'] = \
ip_address
return self.get_location_by_ip_address_endpoint.call_with_http_info(**kwargs)
def get_location_by_wi_fi_access_point(
self,
**kwargs
):
"""Location by WiFi Access Point. # noqa: E501
This service accepts a WiFi access point MAC address and returns the location coordinates corresponding to that access point. Only mac or accessPoint are mandatory parameters (one of them has to be provided), rest are optional. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_location_by_wi_fi_access_point(async_req=True)
>>> result = thread.get()
Keyword Args:
mac (str): This should be the 48 bit mac address (or BSSID) of wireless access point. Accepted format is Six groups of two hexadecimal digits, separated by hyphens (-) or colons.. [optional]
ssid (str): The service set identifier for wi-fi access point. It should be alphanumeric with maximum 32 characters.. [optional]
rsid (str): This is the received signal strength indicator from particular wi-fi access point. It should be a number from -113 to 0 and the unit of this strength is dBm.. [optional]
speed (str): This is the connection speed for wi-fi. It should be a number from 0 to 6930 and the unit should be Mbps.. [optional]
access_point (str): This is the JSON based list of wifi access points in the vicinity of device to be located. This parameter is helpful in case, multiple wifi points are visible and we want to make sure that the location of device is best calculated considering all the access points location.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeoLocationAccessPoint
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_location_by_wi_fi_access_point_endpoint.call_with_http_info(**kwargs)
|
the-stack_0_4726 | #!/usr/bin/env python
"""
For a given motif annotated VCF file (already run through motifs.py) and a bed-like file for loci of interest and some
value for each loci for each sample, find loci that overlap a variant and compare the value of samples with the variant
to those without the variant. Report robut z-scores for each loci overlapped in an output VCF and report the variants
for each loci in a bed-like, loci-centric output file as well.
Usage: activity.py -i <input.vcf> -a <activity.bed> -ov <output.vcf> -ob <output.bed> [OPTIONS]
Args:
-i (str): Path to sorted variant file to process.
-a (str): Path to activity 'bed' file.
-ov (str): Path to VCF output file to be created.
-ob (str): Path to loci output file to be created.
-th (float, optional): Z-score magnitude threshold that must be met for variants/loci to be reported to output.
Default is 0, so all loci a variant overlaps will be reported.
-fan (int, optional): Set number of samples that must meet z-score threshold for a locus to be reported to
bed output file. So this number of samples must have the variant and have the locus's activity be significantly
affected by it. Default is 0, so a locus will be reported if its activity is altered in even one sample above
the robust z-score threshold.
-ib (bool, optional): Should loci that don't contain any variants that significantly affect their activity
be included in the bed output? False by default, set to True if wanted.
-iv (bool, optional): Should variants that don't significantly alter a locus's activity be included in the
vcf output? False by default, set to True if wanted.
"""
from __future__ import print_function # so Ninja IDE will stop complaining & show symbols
import argparse
import time
from statistics import median
from sequence import read_line2sample_list
from utils import Position, timeString
# TODO - Move into a utils.py file and import as appropriate. Add doc_string.
class Variant(object):
"""
Use to process and handle variant records from a VCF more easily. Create from line of VCF file.
"""
def __init__(self, line, all_sample_names):
self.line_list = line.strip().split("\t")
self.pos = Position(self.line_list[0], int(self.line_list[1]),
(int(self.line_list[1]) + len(self.line_list[3])))
self.ref_allele = self.line_list[3]
self.var_allele = self.line_list[4]
self.iden = self.line_list[2]
self.orig_line = line.strip()
self.info_fields = self.line_list[7].split(";")
self.var_samples, self.motif_fields = self.parse_info_fields()
self.ref_samples = [x for x in all_sample_names if x not in self.var_samples]
self.loci = []
if self.var_samples is not None: # Should never evaluate to False.
self.num_var_samps = len(self.var_samples)
else:
self.num_var_samps = 0
def parse_info_fields(self):
"""
Get names of samples containing variant and motif INFO fields from a variant record's INFO fields.
Args:
self (Variant): Variant object.
Returns:
samples (list of str): List of samples in which variant was called.
motif_fields (list of str): List of INFO fields for variant that contain MOTIF related information.
"""
samples = None
motif_fields = []
for field in self.info_fields:
if field != "INDEL": # Take care of INDEL flag.
field_info = field.split("=")
# TODO - This is a hack work around a bug that's messing up the MOTIFN field in tf_expression.py.
# Go back and actually figure out why the MOTIFN field is getting split up sometimes.
try:
name, data = (field_info[0], field_info[1])
except:
name, data = "BROKEN", None
else:
name, data = "INDEL", None
# TODO - Write method that parses header to determine # samples with variant rather than this lazy method.
if name == "set":
samples = data.split("-")
elif name.startswith("MOTIF"):
motif_fields.append(field)
return (samples, motif_fields)
def get_variant_output(self, include_vcf=False):
"""
Create VCF output line for given Variant object.
Args:
include_vcf (bool): True if variants that don't pass the z-score threshold for any Locus should excluded
from output. False if they should be included.
Returns:
output (str): Line for Variant in appropriate VCF format.
or
None: If include_vcf is True and no Locus that Variant overlaps hits the z-score threshold.
"""
info = self.info_fields
info.insert(0, "SAMPSTV=" + ",".join(self.var_samples))
info.insert(0, "SAMPSR=" + ",".join([x.ref_samples[self] for x in self.loci][0]))
info.insert(0, "SAMPSV=" + ",".join([x.var_samples[self] for x in self.loci][0]))
# TODO - Check and make sure next two lines are functioning properly.
info.insert(0, "SAMPSNR=" + ",".join([str(x.num_valid_ref[self]) for x in self.loci]))
info.insert(0, "SAMPSNV=" + ",".join([str(x.num_valid_var[self]) for x in self.loci]))
# Use lists to maintain order in output so that LOCIID, LOCIVZ, SAMPTHN fields can all be matched up.
z_scores = []
pass_thresh = []
loci_idens = []
for item in self.loci:
loci_idens.append(item.iden)
pass_thresh.append(item.num_pass_thresh[self])
tmp = "(" + ",".join([str(round(x, 4)) for x in item.z_scores[self][0]]) + ")"
z_scores.append(tmp)
info.insert(0, "SAMPTHN=" + ",".join([str(x) for x in pass_thresh]))
info.insert(0, "LOCIVZ=" + ",".join(z_scores))
info.insert(0, "LOCIID=" + ",".join(loci_idens))
# Check if any loci have samples that pass the Z-score threshold.
# TODO - Change this so it check that the NUMBER OF SAMPLES reaching the z-score threshold are enough.
if any([x >= 1 for x in pass_thresh]):
self.info_fields = info
self.line_list[7] = ";".join(self.info_fields)
output = "\t".join(self.line_list)
return output
else:
return None
# TODO - Move into a utils.py file and import as appropriate. Make ActLocus a sub-class of Locus along with GeneLocus.
class Locus(object):
"""
Use to process and handle loci records from an activity file more easily.
Args:
pos (Position): Position object holding genomic position of locus.
orig_line (str): String from which the object was originally created.
iden (str): Unique identifier for the locus.
data (list of float): Data values for each sample for the record.
"""
def __init__(self, line):
line_list = line.strip().split("\t")
self.pos = Position(line_list[0], int(line_list[1]), int(line_list[2]))
self.orig_line = line.strip()
self.iden = str(line_list[3])
self.data = [float(x) for x in line_list[4:]]
self.var_samples = {}
self.ref_samples = {}
self.ref_scores = {}
self.var_scores = {}
self.num_valid_ref = {}
self.num_valid_var = {}
self.num_pass_thresh = {}
self.variants = []
self.z_scores = {}
def add_variant(self, variant, var_samples, ref_samples):
"""
Add Variant object variant to list of Variants that overlap the Locus.
"""
self.variants.append(variant)
self.ref_scores[variant] = []
self.var_scores[variant] = []
self.var_samples[variant] = var_samples
self.ref_samples[variant] = ref_samples
self.num_valid_ref[variant] = len(ref_samples)
self.num_valid_var[variant] = len(var_samples)
self.num_pass_thresh[variant] = 0
self.z_scores[variant] = []
def calc_z_score(self, ref_ind, var_ind, variant, thresh=0):
"""
Calculate a robust z-score for the given locus and variant.
This uses the median absolute deviation (MAD):
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
self.num_valid_ref[variant] = len(ref_ind)
self.num_valid_var[variant] = len(var_ind)
for entry in ref_ind:
scores = self.ref_scores[variant]
scores.append(self.data[int(entry)])
self.ref_scores[variant] = scores
for entry in var_ind:
scores = self.var_scores[variant]
scores.append(self.data[int(entry)])
self.var_scores[variant] = scores
# MAD calculation.
all_scores = self.ref_scores[variant] + self.var_scores[variant]
med = median(all_scores)
abs_score = [abs(x - med) for x in all_scores]
mad = median(abs_score) * 1.4826
# 1.4826 is a constant that assumes a normal distribution to use the MAD as a consistent estimator
# of standard deviation.
robust_z_scores = [((x - med) / mad) for x in self.var_scores[variant]]
for item in robust_z_scores:
if abs(item) >= thresh: # Check number of variant samples that passed the threshold.
passed = self.num_pass_thresh[variant]
passed += 1
self.num_pass_thresh[variant] = passed
vals = self.z_scores[variant]
vals.append(robust_z_scores)
self.z_scores[variant] = vals
return
def get_locus_output(self, include_bed=False, filter_num=0):
"""
Create list of output lines for given Locus object.
Args:
include_bed (bool): True if variants that don't pass the z-score threshold for any Locus should excluded
from output. False if they should be included.
filter_num (int): Number of samples the must meet z-score threshold for Variant to be included in output.
Returns:
output (list of str): List of lines for Locus in appropriate BED-like format.
or
None: If include_bed is True and Locus doesn't contain any Variants that hit z-score threshold for any
sample.
"""
output = []
chrom = self.pos.chrom
start = self.pos.start
end = self.pos.end
iden = self.iden
# Get info for each Variant that is necessary for output.
for item in self.variants:
num_meet_thresh = int(self.num_pass_thresh[item])
if num_meet_thresh < filter_num and include_bed is False:
continue # Just go to next Variant if required number of samples didn't meet z-score threshold.
out_line = [chrom, str(start), str(end), str(iden)]
motifs_out = ";".join(item.motif_fields)
out_line.append(item.pos.chrom + ":" + str(item.pos.start) + "_" + item.ref_allele + ">" + item.var_allele)
val_var_samps = self.var_samples[item]
num_val_var = len(self.var_samples[item])
val_ref_samps = ",".join(self.ref_samples[item])
num_val_ref = len(self.ref_samples[item])
all_var_samps = ",".join(item.var_samples)
num_all_var_samps = len(item.var_samples)
all_ref_samps = ",".join(item.ref_samples)
num_all_ref_samps = len(item.ref_samples)
# Handle z_scores.
scores_out = []
scores = self.z_scores[item]
for x in range(len(val_var_samps)):
samp = val_var_samps[x]
score = round(scores[x][x], 4)
scores_out.append(samp + "=" + str(score))
out_line.append(",".join(scores_out))
out_line.append(str(num_meet_thresh))
out_line.append(",".join(val_var_samps))
out_line.append(str(num_val_var))
out_line.append(val_ref_samps)
out_line.append(str(num_val_ref))
out_line.append(all_var_samps)
out_line.append(str(num_all_var_samps))
out_line.append(all_ref_samps)
out_line.append(str(num_all_ref_samps))
out_line.append(motifs_out)
output.append("\t".join(out_line))
if output: # If no variants are in the output list, just return None.
return output
else:
return None
def get_activity_samples(header_line):
"""
Parse header of activity file to return sample names and column indices.
Args:
header_line (str): Header line from activity file.
Returns:
act_samples (dict): Dictionary of {sample_name (str): sample_data_index (int)}.
sample_data_index is index for data in sample list, not the line as a whole.
e.g.: [samp1, samp2, samp3] & [20, 10, 5] for data values, then {'samp1': 0}.
"""
line_list = header_line.strip().split("\t")
samples = line_list[4:]
act_samples = {}
for item in samples:
samp_idx = samples.index(item)
sample = item.split(".")[0]
act_samples[sample] = samp_idx
return act_samples
def compare_samples(act_samples, vcf_samples):
"""
Compare samples from activity file and vcf file.
Return only samples in both as a list and delete those not found in both from the act_samples dict.
Args:
act_samples (dict): {(act_sample_names (str)): sample_indices (int)}
vcf_samples (list of str): List of samples found in VCF file.
Returns:
common_samps (list of str): List of names of samples found in both the activity file and VCF file.
valid_act_samps (dict): Dict of {sample_names (str): activity file data column index (int)} for samples found
in both the activity file and VCF file.
"""
common_samps = list(set(list(act_samples)) & set(vcf_samples))
valid_act_samples = {}
# Create new dict for activity samples containing only those found in VCF file as well.
for x in common_samps:
valid_act_samples[x] = act_samples[x]
return (common_samps, valid_act_samples)
def parse_activity_file(activity_file):
"""
Parse activity file to get data values for each record along with sample
names and indices.
Args:
activity_file (str): Path to activity file to process.
Returns:
act_samples (dict): Dict of {sample_name: index for activity vals}.
act_data (list of Locus): List of Locus objects.
"""
with open(activity_file) as f:
header = f.readline().strip()
act_samples = get_activity_samples(header) # Get sample names/indices.
act_data = []
for line in f:
record = Locus(line)
act_data.append(record)
return (act_samples, act_data)
def reduce_activity_names(act_samps, split_string="_"):
"""
Return only unique part of names in the passed list set.
Code assumes either start or end is unique based on unique set size
Args:
act_samps: list of strings for the activity samples
or dictionary with keys being strings for activity samples
split_string: String to split individual act_samps on
assumes single split is relevant
default = "_"
Returns:
act_samps modified to not include the non-unique part of the input strings
returns same type as the input act_samps, list or dict
"""
split_one = []
split_two = []
for sample in act_samps:
# 1. split on split_string
splitList = sample.split(split_string)
# 2. put first split and all remaining splits in 2 arrays
split_one.append(splitList[0])
if len(splitList) == 1:
# because otherwise it adds an empty list item and breaks below
split_two.append("")
else:
split_two.append(split_string.join(splitList[1:]))
# 3. determine the unique set size (just making it a set makes them unique
s1 = set(split_one)
s2 = set(split_two)
if len(s1) > len(s2):
# s2 is the non-unique part; ie s1 is unique
act_samps_temp = list(s1)
else:
# s1 is the non-unique part; ie s2 is unique
act_samps_temp = list(s2)
if type(act_samps) is list:
# do nothing just return
return (act_samps_temp)
elif type(act_samps) is dict:
# must rebuild the dictionary
act_samps_rebuild = {}
ind = -1
for sample in act_samps:
ind = ind + 1
act_samps_rebuild[act_samps_temp[ind]] = act_samps[sample]
return (act_samps_rebuild)
def main(vcf_file, act_file, out_vcf, out_bed, thresh=0, filter_num=0, include_bed=False, include_vcf=False,
drop_act_=1):
"""
Compare activity of loci for samples harboring a variant within a given locus to those samples that do not.
For a given motif annotated VCF file (already run through motifs.py) and a bed-like file for loci of interest and
some value for each loci for each sample, find loci that overlap a variant and compare the value of samples with
the variant to those without the variant. Report z-scores for each loci overlapped in an output VCF and report the
variants for each loci in a bed-like, loci-centric output file as well.
Args:
vcf_file (str): Path to sorted variant file to process.
act_file (str): Path to activity 'bed' file.
out_vcf (str): Path to VCF output file to be created.
out_bed (str): Path to loci output file to be created.
thresh (float, optional): Z-score magnitude that must be met for variants/loci to be reported to output.
filter_num (int, optional): Set number of samples that must meet z-score threshold for locus to be reported to
bed output file. So this number of samples must have the variant and be significantly affected by it.
include_bed (bool, optional): True if loci should be reported in the bed output even if they don't have a
variant in them that significantly affects their activity.
include_vcf (bool, optional): True if variants should be reported in the VCF output even if they don't lie in
a Locus and significantly affect its activity.
drop_act_ (integer, optional): If > 0 then break activity items on _,
return only unique part of name.
code assumes either start or end is unique based on unique set size
once dropped reruns comparison to the vcf samples
if 1: only runs if prior vcf comparison results in no overlap
if 2: runs no matter what
"""
print("Parsing activity data file: " + timeString() + ".")
act_samps, act_data = parse_activity_file(act_file)
output_vcf = open(out_vcf, "w")
output_bed = open(out_bed, "w")
loci_out = [] # Use to hold all Locus objects that overlap a Variant.
with open(vcf_file) as f:
# Add new INFO lines.
line = f.readline().strip()
now = time.strftime("%c")
info_needed = True
# TODO - Refactor this so output isn't such an enormous mess. One info field, multiple sub-fields per motif.
# TODO - Add sample names for those that pass threshold.
info = '##INFO=<ID=LOCIID,Number=.,Type=String,Description="IDs for loci that variant overlaps.">\n'
info += '##INFO=<ID=SAMPSTV,Number=.,Type=String,Description="All samples with the variant allele.">\n'
info += ('##INFO=<ID=SAMPSR,Number=.,Type=String,Description="Samples with the reference allele and loci data'
'.">\n')
info += ('##INFO=<ID=SAMPSV,Number=.,Type=String,Description="Samples with the variant allele and loci data.'
'">\n')
info += ('##INFO=<ID=LOCIVZ,Number=.,Type=String,Description="Robust z-score for each loci '
'containing the variant. Calculated for each sample containing the variant for each loci.">\n')
info += ('##INFO=<ID=SAMPTHN,Number=.,Type=Integer,Description="Number of samples in which the variant meets'
'the z-score magnitude threshold.">\n')
info += ('##INFO=<ID=SAMPSNV,Number=1,Type=Integer,Description="Number of samples containing variant'
' and having loci data.">\n')
info += ('##INFO=<ID=SAMPSNR,Number=1,Type=Integer,Description="Number of samples containing reference'
' and having loci data.">')
command = ('##venusaur=<ID=activity,Date="' + now + '",CommandLineOptions="--input ' + vcf_file +
' --activity ' + act_file + ' --outputvcf ' + out_vcf + ' --outputbed ' + out_bed +
' --threshold ' + str(thresh) + ' --filter_act_num ' + str(filter_num) + ' --include_bed ' +
str(include_bed) + ' --include_vcf ' + str(include_vcf) + '">')
# Print new info lines at the top of the ##INFO section.
while line.startswith("##"):
if info_needed and line.startswith("##INFO"):
print(command, file=output_vcf)
print(command, file=output_bed)
print(info, file=output_vcf)
info_needed = False
print(line, file=output_vcf)
line = f.readline().strip()
vcf_samples = read_line2sample_list(line) # Parse VCF sample header line to get samples present in file.
print(line, file=output_vcf)
print("Comparing samples in VCF file and activity file to find commonalities.\n")
print("VCF samples: ", *vcf_samples, end="\n\n")
print("Activity samples: ", *list(act_samps.keys()), end="\n\n")
common_samps, valid_act_samps = compare_samples(act_samps, vcf_samples) # Get common samples b/twn the two.
print("Common samples: ", *common_samps, end="\n\n")
if drop_act_ > 0:
if drop_act_ == 1 and len(common_samps) == 0:
redo_compare = True
act_samps = reduce_activity_names(act_samps)
elif drop_act_ == 2:
redo_compare = True
# merge old and new samps to match when compare_samples is run below
# if they were just lists the following would work but they are not
# act_samps = list(set(reduce_activity_names(act_samps)) | set(list(act_samps)))
extend_dict = reduce_activity_names(act_samps)
for extdictkey in extend_dict:
act_samps[extdictkey] = extend_dict[extdictkey]
else:
redo_compare = False
if redo_compare:
# Get common samples b/twn the two input sets: vcf and activity.
common_samps, valid_act_samps = compare_samples(act_samps, vcf_samples)
print("Updated Common samples: ", *common_samps, end="\n\n")
print("Processing variants. This may take some time.")
# TODO - Progress bar might actually be a decent addition.
for line in f:
current_var = Variant(line, vcf_samples)
loci_ovlp_var = []
# Check if any of the variant samples actually have activity data as well, skip if not.
for x in current_var.var_samples:
if x in common_samps:
for item in act_data:
if current_var.pos.chrom != item.pos.chrom:
continue
elif current_var.pos.overlaps(item.pos):
loci_ovlp_var.append(item)
break
# If variant overlaps no loci, print to output only if include_vcf option used.
if not loci_ovlp_var:
if include_vcf:
print(line.strip(), file=output_vcf)
continue
else:
continue
# Get activity data indices for both samples with variant and without.
var_act_indices = [valid_act_samps[x] for x in current_var.var_samples if x in valid_act_samps]
ref_act_indices = [valid_act_samps[x] for x in valid_act_samps if x not in current_var.var_samples]
# Calculate z-scores.
for x, loc in enumerate(loci_ovlp_var):
var_samples = [x for x in current_var.var_samples if x in valid_act_samps]
ref_samples = [x for x in valid_act_samps if x not in current_var.var_samples]
loc.add_variant(current_var, var_samples, ref_samples) # Add Variant to Locus object.
loc.calc_z_score(ref_act_indices, var_act_indices, current_var, thresh)
current_var.loci.append(loc) # Add Locus object to given Variant.
loci_ovlp_var[x] = loc
if loc not in loci_out:
loci_out.append(loc) # These will be used for eventual BED output.
vcf_out_line = current_var.get_variant_output(include_vcf)
if vcf_out_line is not None:
print(vcf_out_line, file=output_vcf)
elif include_vcf:
print(line.strip(), file=output_vcf)
print("Filtering loci and creating BED output.")
print("CHR", "START", "END", "ID", "VARIANT", "Z_SCORES", "NUM_PASS_THRESH", "COMMON_VAR_SAMPS",
"NUM_COMMON_VAR_SAMPS", "COMMON_REF_SAMPS", "NUM_COMMON_REF_SAMPS", "ALL_VAR_SAMPS", "NUM_ALL_VAR_SAMPS",
"ALL_REF_SAMPS", "NUM_COMMON_REF_SAMPS"
"MOTIF_INFO", sep="\t", file=output_bed)
for item in loci_out:
bed_out_line = item.get_locus_output(include_bed, filter_num)
if bed_out_line is not None:
print(*bed_out_line, sep="\n", file=output_bed)
print("Complete: " + timeString() + ".")
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument("-i", "--input", dest="input_file", required=True)
parser.add_argument("-a", "--activity", dest="activity_file", required=True)
parser.add_argument("-ov", "--outputvcf", dest="output_vcf", required=True)
parser.add_argument("-ob", "--outputbed", dest="output_bed", required=True)
parser.add_argument("-th", "--threshold", dest="threshold", required=False, default=0)
parser.add_argument("-fan", "--filter_act_num", dest="filter_a_n", required=False, default=0)
parser.add_argument("-ib", "--include_bed", action="store_true", required=False)
parser.add_argument("-iv", "--include_vcf", action="store_true", required=False)
args = parser.parse_args()
inp_file = args.input_file
act_file = args.activity_file
vcf_out = args.output_vcf
bed_out = args.output_bed
th = float(args.threshold)
filter_bed_num = int(args.filter_a_n)
include_bed = args.include_bed
include_vcf = args.include_vcf
main(inp_file, act_file, vcf_out, bed_out, th, filter_bed_num, include_bed, include_vcf)
|
the-stack_0_4727 | #!/usr/bin/env python
"""
Fetches github linguist repository, process its information
and store it in database
"""
import os
import rethinkdb as r
import schedule
import shutil
import subprocess
import sys
import time
import yaml
DEVNULL = open(os.devnull, 'wb')
LANGUAGES_REPO = "https://github.com/github/linguist.git"
LANGUAGES_PATH = "./lib/linguist/languages.yml"
REPO_DIR = "/tmp/linguist"
DB_HOST = os.getenv('DB', 'localhost')
DB = r.connect(DB_HOST, 28015)
def main():
"""
Executes the job at the beggining and at every SLEEP_MINUTES
"""
try:
run()
schedule.every().hour.do(run)
while True:
schedule.run_pending()
time.sleep(1)
except:
clean()
def run():
prepare()
dates = langs_dates()
metadata = languages_metadata()
languages = []
for l in metadata:
object = {}
object['name'] = l
object['timestamp'] = dates[l]
if metadata[l].get('type', None):
object['type'] = metadata[l]['type']
if metadata[l].get('group', None):
object['group'] = metadata[l]['group']
languages.append(object)
sorted_languages = sorted(languages,
key = lambda lang: lang["timestamp"],
reverse=True)
store(sorted_languages)
clean()
def prepare():
"""
Clone the linguist repo and change the working directory to it.
It also deletes the linguist directory it if was already present
"""
clean()
subprocess.call(["git", "clone", LANGUAGES_REPO, REPO_DIR],
stdout=DEVNULL, stderr=DEVNULL)
os.chdir(REPO_DIR)
def clean():
"""
Return to the previous working directory and remove the linguist directory
"""
if os.path.exists(REPO_DIR):
os.chdir("/")
shutil.rmtree(REPO_DIR)
def langs_dates():
"""
Returns the list of languages available in the language file
with the date in which it was added
"""
language_history = set()
result = {}
for i, commit in enumerate(commits()):
actual = languages_in_commit(commit)
if i == 0:
timestamp = commit_time(commit)
for language in actual:
result[language] = timestamp
language_history = set(actual)
else:
old = language_history
language_history = language_history.union(set(actual))
diff = language_history - old
if diff:
timestamp = commit_time(commit)
for language in diff:
result[language] = timestamp
filtered = filter_deleted(result)
return result
def languages_metadata():
yaml = read_langs_file()
metadata_keys = ('type', 'group')
result = {}
for languages in yaml:
result[languages] = {k: yaml[languages][k] for k in yaml[languages] if k in metadata_keys}
return result
def commits():
"""
Returns the list of commits in ascending order that changed
the languages file without counting the commit merges
"""
commits_b = subprocess.check_output(["git", "log", "--no-merges", "--pretty=%H", LANGUAGES_PATH], stderr=DEVNULL)
commits_reverse = commits_b.decode().strip().split('\n')
return commits_reverse[::-1]
def languages_lang_file():
"""
Returns the list of languages present in the language file
with their respective type and group
"""
yaml = read_langs_file()
return list(yaml.keys())
def read_langs_file():
"""
Reads the language file
"""
with open(LANGUAGES_PATH) as langs_file:
try:
languages_yaml = yaml.load(langs_file)
return languages_yaml
except:
return {}
def languages_in_commit(commit):
"""
Returns the list of languages
present in the language file for a specific commit
"""
subprocess.call(["git", "checkout", commit, LANGUAGES_PATH],
stdout=DEVNULL, stderr=DEVNULL)
return languages_lang_file()
def commit_time(commit):
"""
Returns the commit time in epoc format of a specific commit
"""
output_b = subprocess.check_output(["git", "show", "-s", "--format=%ct",
commit])
output = output_b.decode().strip()
return int(output)
def filter_deleted(languages):
"""
Returns a hash with the languages that are in the languages argument
minus the ones that are no longer present in the last commit
"""
subprocess.call(["git", "reset", "--hard", "master"],
stdout=DEVNULL, stderr=DEVNULL)
last_languages = languages_lang_file()
filtered_languages = {}
for lang in languages:
if lang in last_languages:
filtered_languages[lang] = languages[lang]
return filtered_languages
def store(languages):
"""
Stores in database the result.
If the result is equal to the latest row in the db
it only updates the timestamp
"""
table = r.db('indielangs').table("languages")
latest, latest_id = latest_result()
if latest == languages:
table.get(latest_id).update({'timestamp': r.now()}).run(DB)
else:
row = {'languages': languages, 'timestamp': r.now()}
table.insert(row).run(DB)
def latest_result():
"""
Returns the latest row with the list of languages
available in the database and the id of the row
"""
table = r.db('indielangs').table("languages")
latest = table.order_by(r.desc('timestamp')).limit(1).run(DB)
if latest:
return latest[0]['languages'], latest[0]['id']
else:
return {}, None
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_4728 | from __future__ import unicode_literals
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.models.options import normalize_together
from django.utils import six
from django.utils.functional import cached_property
from .fields import (
AddField, AlterField, FieldOperation, RemoveField, RenameField,
)
def _check_for_duplicates(arg_name, objs):
used_vals = set()
for val in objs:
if val in used_vals:
raise ValueError(
"Found duplicate value %s in CreateModel %s argument." % (val, arg_name)
)
used_vals.add(val)
class ModelOperation(Operation):
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def reduce(self, operation, in_between, app_label=None):
return (
super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_model(self.name, app_label)
)
class CreateModel(ModelOperation):
"""
Create a model's table.
"""
serialization_expand_args = ['fields', 'options', 'managers']
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
super(CreateModel, self).__init__(name)
# Sanity-check that there are no duplicated field names, bases, or
# manager names
_check_for_duplicates('fields', (name for name, _ in self.fields))
_check_for_duplicates('bases', (
base._meta.label_lower if hasattr(base, '_meta') else
base.lower() if isinstance(base, six.string_types) else base
for base in self.bases
))
_check_for_duplicates('managers', (name for name, _ in self.managers))
def deconstruct(self):
kwargs = {
'name': self.name,
'fields': self.fields,
}
if self.options:
kwargs['options'] = self.options
if self.bases and self.bases != (models.Model,):
kwargs['bases'] = self.bases
if self.managers and self.managers != [('objects', models.Manager())]:
kwargs['managers'] = self.managers
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.add_model(ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
name_lower = name.lower()
if name_lower == self.name_lower:
return True
# Check we didn't inherit from the model
models_to_check = [base for base in self.bases if base is not models.Model]
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.remote_field:
models_to_check.append(field.remote_field.model)
# Now go over all the models and check against them
for model in models_to_check:
model_app_label, model_name = self.model_to_key(model)
if model_name.lower() == name_lower:
if app_label is None or not model_app_label or model_app_label == app_label:
return True
return False
def model_to_key(self, model):
"""
Take either a model class or an "app_label.ModelName" string
and return (app_label, object_name).
"""
if isinstance(model, six.string_types):
return model.split(".", 1)
else:
return model._meta.app_label, model._meta.object_name
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, DeleteModel) and
self.name_lower == operation.name_lower and
not self.options.get("proxy", False)):
return []
elif isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower:
return [
CreateModel(
operation.new_name,
fields=self.fields,
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower:
if isinstance(operation, AddField):
# Don't allow optimizations of FKs through models they reference
if hasattr(operation.field, "remote_field") and operation.field.remote_field:
for between in in_between:
# Check that it doesn't point to the model
app_label, object_name = self.model_to_key(operation.field.remote_field.model)
if between.references_model(object_name, app_label):
return False
# Check that it's not through the model
if getattr(operation.field.remote_field, "through", None):
app_label, object_name = self.model_to_key(operation.field.remote_field.through)
if between.references_model(object_name, app_label):
return False
return [
CreateModel(
self.name,
fields=self.fields + [(operation.name, operation.field)],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterField):
return [
CreateModel(
self.name,
fields=[
(n, operation.field if n == operation.name else v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RemoveField):
return [
CreateModel(
self.name,
fields=[
(n, v)
for n, v in self.fields
if n.lower() != operation.name_lower
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RenameField):
return [
CreateModel(
self.name,
fields=[
(operation.new_name if n == operation.old_name else n, v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
return super(CreateModel, self).reduce(operation, in_between, app_label=app_label)
class DeleteModel(ModelOperation):
"""
Drops a model's table.
"""
def deconstruct(self):
kwargs = {
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(ModelOperation):
"""
Renames a model.
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
super(RenameModel, self).__init__(old_name)
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
'old_name': self.old_name,
'new_name': self.new_name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
apps = state.apps
model = apps.get_model(app_label, self.old_name)
model._meta.apps = apps
# Get all of the related objects we need to repoint
all_related_objects = (
f for f in model._meta.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many)
)
# Rename the model
state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower]
state.models[app_label, self.new_name_lower].name = self.new_name
state.remove_model(app_label, self.old_name_lower)
# Repoint the FKs and M2Ms pointing to us
for related_object in all_related_objects:
if related_object.model is not model:
# The model being renamed does not participate in this relation
# directly. Rather, a superclass does.
continue
# Use the new related key for self referential related objects.
if related_object.related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
new_fields = []
for name, field in state.models[related_key].fields:
if name == related_object.field.name:
field = field.clone()
field.remote_field.model = "%s.%s" % (app_label, self.new_name)
new_fields.append((name, field))
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
# Repoint M2Ms with through pointing to us
related_models = {
f.remote_field.model for f in model._meta.fields
if getattr(f.remote_field, 'model', None)
}
model_name = '%s.%s' % (app_label, self.old_name)
for related_model in related_models:
if related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (related_model._meta.app_label, related_model._meta.model_name)
new_fields = []
changed = False
for name, field in state.models[related_key].fields:
if field.is_relation and field.many_to_many and field.remote_field.through == model_name:
field = field.clone()
field.remote_field.through = '%s.%s' % (app_label, self.new_name)
changed = True
new_fields.append((name, field))
if changed:
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
state.reload_model(app_label, self.new_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
for related_object in old_model._meta.related_objects:
if related_object.related_model == old_model:
model = new_model
related_key = (app_label, self.new_name_lower)
else:
model = related_object.related_model
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
to_field = to_state.apps.get_model(
*related_key
)._meta.get_field(related_object.field.name)
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
# Rename M2M fields whose name is based on this model's name.
fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many)
for (old_field, new_field) in fields:
# Skip self-referential fields as these are renamed above.
if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created:
continue
# Rename the M2M table that's based on this model's name.
old_m2m_model = old_field.remote_field.through
new_m2m_model = new_field.remote_field.through
schema_editor.alter_db_table(
new_m2m_model,
old_m2m_model._meta.db_table,
new_m2m_model._meta.db_table,
)
# Rename the column in the M2M table that's based on this
# model's name.
schema_editor.alter_field(
new_m2m_model,
old_m2m_model._meta.get_field(old_model._meta.model_name),
new_m2m_model._meta.get_field(new_model._meta.model_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name_lower or
name.lower() == self.new_name_lower
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, RenameModel) and
self.new_name_lower == operation.old_name_lower):
return [
RenameModel(
self.old_name,
operation.new_name,
),
]
# Skip `ModelOperation.reduce` as we want to run `references_model`
# against self.new_name.
return (
super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_model(self.new_name, app_label)
)
class AlterModelTable(ModelOperation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.table = table
super(AlterModelTable, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'table': self.table,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.models[app_label, self.name_lower].options["db_table"] = self.table
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
def reduce(self, operation, in_between, app_label=None):
if isinstance(operation, (AlterModelTable, DeleteModel)) and self.name_lower == operation.name_lower:
return [operation]
return super(AlterModelTable, self).reduce(operation, in_between, app_label=app_label)
class ModelOptionOperation(ModelOperation):
def reduce(self, operation, in_between, app_label=None):
if isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower:
return [operation]
return super(ModelOptionOperation, self).reduce(operation, in_between, app_label=app_label)
class FieldRelatedOptionOperation(ModelOptionOperation):
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, FieldOperation) and
self.name_lower == operation.model_name_lower and
not self.references_field(operation.model_name, operation.name)):
return [operation, self]
return super(FieldRelatedOptionOperation, self).reduce(operation, in_between, app_label=app_label)
class AlterUniqueTogether(FieldRelatedOptionOperation):
"""
Changes the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
unique_together = normalize_together(unique_together)
self.unique_together = set(tuple(cons) for cons in unique_together)
super(AlterUniqueTogether, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'unique_together': self.unique_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.unique_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.unique_together or
any((name in together) for together in self.unique_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or ''))
class AlterIndexTogether(FieldRelatedOptionOperation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
index_together = normalize_together(index_together)
self.index_together = set(tuple(cons) for cons in index_together)
super(AlterIndexTogether, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'index_together': self.index_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.index_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.index_together or
any((name in together) for together in self.index_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or ''))
class AlterOrderWithRespectTo(FieldRelatedOptionOperation):
"""
Represents a change with the order_with_respect_to option.
"""
def __init__(self, name, order_with_respect_to):
self.order_with_respect_to = order_with_respect_to
super(AlterOrderWithRespectTo, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'order_with_respect_to': self.order_with_respect_to,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field("_order"))
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
self.order_with_respect_to is None or
name == self.order_with_respect_to
)
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(ModelOptionOperation):
"""
Sets new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.options = options
super(AlterModelOptions, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'options': self.options,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options = dict(model_state.options)
model_state.options.update(self.options)
for key in self.ALTER_OPTION_KEYS:
if key not in self.options and key in model_state.options:
del model_state.options[key]
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change Meta options on %s" % (self.name, )
class AlterModelManagers(ModelOptionOperation):
"""
Alters the model's managers
"""
serialization_expand_args = ['managers']
def __init__(self, name, managers):
self.managers = managers
super(AlterModelManagers, self).__init__(name)
def deconstruct(self):
return (
self.__class__.__name__,
[self.name, self.managers],
{}
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.managers = list(self.managers)
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change managers on %s" % (self.name, )
|
the-stack_0_4729 | from typing import Tuple
import os
import torch as th
import numpy as np
import gym
import copy
from torch.utils.tensorboard import SummaryWriter
from auto_rl.learning.policy import MLPActorCritic
from auto_rl.learning.buffer import OnPolicyBuffer
from auto_rl.learning.rewards import single_worker_gae, mc_reward_estimation
from auto_rl.utils.torch import change_optim_lr, grad_clip
from auto_rl.utils.gym import infer_action_size, infer_action_type
from auto_rl.simulation.run import single_worker_rollout, rollout_rew, eval_with_render
from auto_rl.utils.tensorboard import log_actor_critic_graph
from auto_rl.utils.logger import Logger
class PPO:
def __init__(self,
env: gym.Env,
policy: MLPActorCritic,
device: str,
log_dir=None):
assert policy.device == device
self.env = env
self.policy = policy
self.device = device
# general logger
self.logger = Logger()
# Tensorboard writer
self.enable_tensorboard = False
if log_dir is not None:
self.enable_tensorboard = True
if self.enable_tensorboard:
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
self.tb_writer = SummaryWriter(log_dir)
# log computational graph
log_actor_critic_graph(self.tb_writer, self.env, self.policy, self.device)
# Initialize optimizer
self.optimizer = th.optim.Adam(params=self.policy.parameters(), lr=1e-4)
# Old policy
self.policy_old = copy.deepcopy(self.policy)
self.policy_old.eval()
self.mse_loss = th.nn.MSELoss()
self.buffer = OnPolicyBuffer()
# Action type and size
self.action_type = infer_action_type(self.env)
self.action_size = infer_action_size(self.env, self.action_type)
def predict(self, obs: np.ndarray, deterministic=False):
action = self.policy_old.predict(obs, deterministic)
return action
def rollout(self, rollout_steps):
single_worker_rollout(self.env, self.policy, self.buffer, rollout_steps)
# Log
rew_mean, rew_min, rew_max = rollout_rew(self.buffer)
self.logger.add("rew. avg.", rew_mean)
self.logger.add("rew. min", rew_min)
self.logger.add("rew. max", rew_max)
if self.enable_tensorboard:
self.tb_writer.add_scalar("reward/mean", rew_mean)
self.tb_writer.add_scalar("reward/min", rew_min)
self.tb_writer.add_scalar("reward/max", rew_max)
def update(self, lr, optimize_epoch_num, batch_size,
gamma, gae_lam, ratio_clip_cnst,
entropy_coef, value_coef, grad_clip_cnst):
change_optim_lr(self.optimizer, lr)
loss, value, entropy = None, None, None
for _ in range(optimize_epoch_num):
loss, value, entropy = self.compute_loss(batch_size, gamma, gae_lam,
ratio_clip_cnst, entropy_coef, value_coef)
self.optimizer.zero_grad()
loss.backward()
if grad_clip_cnst is not None:
grad_clip(self.policy, grad_clip_cnst)
self.optimizer.step()
# Log
if loss is not None:
self.logger.add("loss", loss.detach().cpu().numpy())
self.logger.add("value", value.detach().cpu().numpy())
self.logger.add("entropy", entropy.detach().cpu().numpy())
if self.enable_tensorboard:
self.tb_writer.add_scalar("loss/loss", loss)
self.tb_writer.add_scalar("loss/value", value)
self.tb_writer.add_scalar("loss/entropy", entropy)
# Copy new weights into old policy
self.policy_old.load_state_dict(self.policy.state_dict())
assert not self.policy_old.training
self.buffer.clear()
def learn(self, total_steps,
rollout_steps,
lr,
optimize_epoch_num,
batch_size,
gamma,
gae_lam,
ratio_clip_cnst,
entropy_coef,
value_coef,
grad_clip_cnst=None,
eval_intv=None):
for i in range(total_steps // rollout_steps + 1):
self.rollout(rollout_steps)
self.update(lr, optimize_epoch_num, batch_size,
gamma, gae_lam, ratio_clip_cnst,
entropy_coef, value_coef,
grad_clip_cnst)
# Log output
self.logger.dump()
# evaluate with video
if eval_intv is not None and i % eval_intv == 0:
eval_with_render(self.env, self.policy)
def compute_loss(self, batch_size, gamma, gae_lam,
ratio_clip_cnst,
entropy_coef, value_coef, use_gae=False) \
-> Tuple[th.Tensor, th.Tensor, th.Tensor]:
if batch_size is None:
# read all data, no batch
s1, actions, rewards, dones, s2 = self.buffer.read()
else:
s1, actions, rewards, dones, s2 = self.buffer.sample(batch_size)
assert not use_gae, "Inefficient to compute GAE from random sample."
s1 = th.from_numpy(s1).float().to(self.device)
actions = th.from_numpy(actions).float().to(self.device)
_, old_log_probs, _ = self.policy_old.eval_policy(s1, actions)
assert self.policy.training
values, log_probs, entropy = self.policy.eval_policy(s1, actions)
advantages, value_estimation = self.compute_advantage(gae_lam, dones, rewards, values, gamma)
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
value_estimation = (value_estimation - value_estimation.mean()) / (value_estimation.std() + 1e-8)
ratios = th.exp(log_probs - old_log_probs.detach())
surr1 = ratios * advantages
surr2 = th.clamp(ratios, 1 - ratio_clip_cnst, 1 + ratio_clip_cnst) * advantages
loss = -th.min(surr1, surr2).mean() - entropy_coef * entropy.mean()
loss = loss + value_coef * self.mse_loss(values, value_estimation)
return loss, values.mean(), entropy.mean()
def compute_advantage(self, gae_lam, dones, rewards, values, gamma):
# FIXME: Understand GAE fully and write this part
"""if gae_lam is not None:
dones = th.from_numpy(dones).float().to(self.device)
rewards = th.from_numpy(rewards).float().to(self.device)
advantages, value_estimation = single_worker_gae(values, dones, rewards, gamma, gae_lam, self.device)
else:"""
value_estimation = mc_reward_estimation(rewards, dones, gamma)
value_estimation = th.tensor(value_estimation).float().to(self.device)
advantages = value_estimation - values.detach()
return advantages, value_estimation
def __del__(self):
if self.enable_tensorboard:
self.tb_writer.close()
|
the-stack_0_4730 | import turtle
# turtle object
t = turtle.Turtle()
t.pensize(6)
turtle.bgcolor("#5383C1")
t.speed(9)
# function for creation of eye
def eye(col, rad):
t.down()
t.fillcolor(col)
t.begin_fill()
t.circle(rad)
t.end_fill()
t.up()
# function for cheeks
def cheek():
t.down()
t.fillcolor("#D03D3D");
t.begin_fill()
t.circle(20)
t.end_fill()
t.up()
# draw face
t.fillcolor('yellow')
t.begin_fill()
t.circle(100)
t.end_fill()
t.up()
# draw eyes
t.goto(-40, 120)
eye('white', 10)
t.goto(-37, 125)
eye('black', 5)
t.goto(40, 120)
eye('white', 10)
t.goto(37, 125)
eye('black', 5)
# draw nose
t.goto(0, 75)
eye('black', 8)
#draw cheek
t.goto(-80, 80)
cheek()
t.goto(80, 80)
cheek()
# draw mouth
t.goto(-40, 85)
t.down()
t.right(90)
t.circle(20, 180)
t.up()
t.goto(0, 85)
t.down()
t.right(180)
t.circle(20, 180)
t.up()
# Drawing left Ear
t.goto(-67,180)
t.down()
t.left(58)
t.fillcolor('#C29349')
t.begin_fill()
t.circle(30, 180)
t.end_fill()
t.up()
# Drawing right ear
t.goto(85, 150)
t.down()
t.right(-73)
t.fillcolor('#C29349')
t.begin_fill()
t.circle(30, 180)
t.end_fill()
t.up()
# draw tongue
t.goto(-30, 65)
t.down()
t.right(-48)
t.fillcolor('white')
t.begin_fill()
t.circle(30, 180)
t.lt(90)
t.fd(60)
t.end_fill()
t.hideturtle()
turtle.done() |
the-stack_0_4731 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Zope Publisher-based FTP Server
This FTP server uses the Zope 3 Publisher to execute commands.
"""
import posixpath
from io import BytesIO
from zope.server.interfaces.ftp import IFileSystem
from zope.server.interfaces.ftp import IFileSystemAccess
from zope.server.ftp.server import FTPServer
from zope.publisher.publish import publish
from zope.interface import implementer
@implementer(IFileSystem)
class PublisherFileSystem(object):
"""Generic Publisher FileSystem implementation."""
def __init__(self, credentials, request_factory):
self.credentials = credentials
self.request_factory = request_factory
def type(self, path):
if path == '/':
return 'd'
return self._execute(path, 'type')
def readfile(self, path, outstream, start=0, end=None):
return self._execute(path, 'readfile',
outstream=outstream, start=start, end=end)
_name = None
for _name in ('names', 'ls'):
f = locals()[_name] = lambda self, path, filter=None, _name=_name: self._execute(
path,
_name,
split=False,
filter=filter)
f.__name__ = _name
for _name in ('lsinfo', 'mtime', 'size', 'mkdir', 'remove', 'rmdir'):
f = locals()[_name] = lambda self, path, _name=_name: self._execute(path, _name)
f.__name__ = _name
del _name
def rename(self, old, new):
'See IWriteFileSystem'
old = self._translate(old)
new = self._translate(new)
path0, old = posixpath.split(old)
path1, new = posixpath.split(new)
assert path0 == path1
return self._execute(path0, 'rename', split=False, old=old, new=new)
def writefile(self, path, instream, start=None, end=None, append=False):
'See IWriteFileSystem'
return self._execute(
path, 'writefile',
instream=instream, start=start, end=end, append=append)
def writable(self, path):
'See IWriteFileSystem'
return self._execute(path, 'writable')
def _execute(self, path, command, split=True, **kw):
env = {}
env.update(kw)
env['command'] = command
path = self._translate(path)
if split:
env['path'], env['name'] = posixpath.split(path)
else:
env['path'] = path
env['credentials'] = self.credentials
request = self.request_factory(BytesIO(b''), env)
# Note that publish() calls close() on request, which deletes the
# response from the request, so that we need to keep track of it.
# agroszer: 2008.feb.1.: currently the above seems not to be true
# request will KEEP the response on close()
# even more if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = publish(request)
return request.response.getResult()
def _translate(self, path):
# Normalize
path = posixpath.normpath(path)
if path.startswith('..'):
# Someone is trying to get lower than the permitted root.
# We just ignore it.
path = '/'
return path
class PublisherFTPServer(FTPServer):
"""Generic FTP Server"""
def __init__(self, request_factory, name, ip, port, *args, **kw):
fs_access = PublisherFileSystemAccess(request_factory)
super(PublisherFTPServer, self).__init__(ip, port, fs_access,
*args, **kw)
@implementer(IFileSystemAccess)
class PublisherFileSystemAccess(object):
def __init__(self, request_factory):
self.request_factory = request_factory
def authenticate(self, credentials):
# We can't actually do any authentication initially, as the
# user may not be defined at the root.
pass
def open(self, credentials):
return PublisherFileSystem(credentials, self.request_factory)
|
the-stack_0_4732 | from Child import Child
from Node import Node # noqa: I201
ATTRIBUTE_NODES = [
# token-list -> token? token-list?
Node('TokenList', kind='SyntaxCollection',
element='Token'),
# token-list -> token token-list?
Node('NonEmptyTokenList', kind='SyntaxCollection',
element='Token', omit_when_empty=True),
Node('CustomAttribute', kind='Syntax',
description='''
A custom `@` attribute.
''',
children=[
Child('AtSignToken', kind='AtSignToken',
description='The `@` sign.'),
Child('AttributeName', kind='Type', classification='Attribute',
description='The name of the attribute.'),
Child('LeftParen', kind='LeftParenToken',
is_optional=True),
Child('ArgumentList', kind='FunctionCallArgumentList',
collection_element_name='Argument', is_optional=True),
Child('RightParen', kind='RightParenToken',
is_optional=True),
]),
# attribute -> '@' identifier '('?
# ( identifier
# | string-literal
# | integer-literal
# | availability-spec-list
# | specialize-attr-spec-list
# | implements-attr-arguments
# | differentiable-attr-arguments
# | named-attribute-string-argument
# )? ')'?
Node('Attribute', kind='Syntax',
description='''
An `@` attribute.
''',
children=[
Child('AtSignToken', kind='AtSignToken',
description='The `@` sign.'),
Child('AttributeName', kind='Token', classification='Attribute',
description='The name of the attribute.'),
Child('LeftParen', kind='LeftParenToken', is_optional=True,
description='''
If the attribute takes arguments, the opening parenthesis.
'''),
Child('Argument', kind='Syntax', is_optional=True,
node_choices=[
Child('Identifier', kind='IdentifierToken'),
Child('String', kind='StringLiteralToken'),
Child('Integer', kind='IntegerLiteralToken'),
Child('Availability', kind='AvailabilitySpecList'),
Child('SpecializeArguments',
kind='SpecializeAttributeSpecList'),
Child('ObjCName', kind='ObjCSelector'),
Child('ImplementsArguments',
kind='ImplementsAttributeArguments'),
# SWIFT_ENABLE_TENSORFLOW
Child('DifferentiableArguments',
kind='DifferentiableAttributeArguments'),
# SWIFT_ENABLE_TENSORFLOW
Child('DifferentiatingArguments',
kind='DifferentiatingAttributeArguments'),
# SWIFT_ENABLE_TENSORFLOW
Child('TransposingArguments',
kind='DifferentiatingAttributeArguments'),
Child('NamedAttributeString',
kind='NamedAttributeStringArgument'),
], description='''
The arguments of the attribute. In case the attribute \
takes multiple arguments, they are gather in the \
appropriate takes first.
'''),
Child('RightParen', kind='RightParenToken', is_optional=True,
description='''
If the attribute takes arguments, the closing parenthesis.
'''),
# TokenList to gather remaining tokens of invalid attributes
# FIXME: Remove this recovery option entirely
Child('TokenList', kind='TokenList',
collection_element_name='Token', is_optional=True),
]),
# attribute-list -> attribute attribute-list?
Node('AttributeList', kind='SyntaxCollection',
element='Syntax', element_name='Attribute',
element_choices=[
'Attribute',
'CustomAttribute',
]),
# The argument of '@_specialize(...)'
# specialize-attr-spec-list -> labeled-specialize-entry
# specialize-spec-attr-list?
# | generic-where-clause
# specialize-spec-attr-list?
Node('SpecializeAttributeSpecList', kind='SyntaxCollection',
description='''
A collection of arguments for the `@_specialize` attribute
''',
element='Syntax', element_name='SpecializeAttribute',
element_choices=[
'LabeledSpecializeEntry',
'GenericWhereClause',
]),
# Representation of e.g. 'exported: true,'
# labeled-specialize-entry -> identifier ':' token ','?
Node('LabeledSpecializeEntry', kind='Syntax',
description='''
A labeled argument for the `@_specialize` attribute like \
`exported: true`
''',
traits=['WithTrailingComma'],
children=[
Child('Label', kind='IdentifierToken',
description='The label of the argument'),
Child('Colon', kind='ColonToken',
description='The colon separating the label and the value'),
Child('Value', kind='Token',
description='The value for this argument'),
Child('TrailingComma', kind='CommaToken',
is_optional=True, description='''
A trailing comma if this argument is followed by another one
'''),
]),
# The argument of '@_dynamic_replacement(for:)' or '@_private(sourceFile:)'
# named-attribute-string-arg -> 'name': string-literal
Node('NamedAttributeStringArgument', kind='Syntax',
description='''
The argument for the `@_dynamic_replacement` or `@_private` \
attribute of the form `for: "function()"` or `sourceFile: \
"Src.swift"`
''',
children=[
Child('NameTok', kind='Token',
description='The label of the argument'),
Child('Colon', kind='ColonToken',
description='The colon separating the label and the value'),
Child('StringOrDeclname', kind='Syntax', node_choices=[
Child('String', kind='StringLiteralToken'),
Child('Declname', kind='DeclName'),
]),
]),
Node('DeclName', kind='Syntax', children=[
Child('DeclBaseName', kind='Syntax', description='''
The base name of the protocol\'s requirement.
''',
node_choices=[
Child('Identifier', kind='IdentifierToken'),
Child('Operator', kind='PrefixOperatorToken'),
]),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True, description='''
The argument labels of the protocol\'s requirement if it \
is a function requirement.
'''),
]),
# The argument of '@_implements(...)'
# implements-attr-arguments -> simple-type-identifier ','
# (identifier | operator) decl-name-arguments
Node('ImplementsAttributeArguments', kind='Syntax',
description='''
The arguments for the `@_implements` attribute of the form \
`Type, methodName(arg1Label:arg2Label:)`
''',
children=[
Child('Type', kind='SimpleTypeIdentifier', description='''
The type for which the method with this attribute \
implements a requirement.
'''),
Child('Comma', kind='CommaToken',
description='''
The comma separating the type and method name
'''),
Child('DeclBaseName', kind='Syntax', description='''
The base name of the protocol\'s requirement.
''',
node_choices=[
Child('Identifier', kind='IdentifierToken'),
Child('Operator', kind='PrefixOperatorToken'),
]),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True, description='''
The argument labels of the protocol\'s requirement if it \
is a function requirement.
'''),
]),
# SWIFT_ENABLE_TENSORFLOW
# The argument of '@differentiable(...)'.
# differentiable-attr-arguments ->
# differentiation-params-clause? ','?
# differentiable-attr-func-specifier? # primal
# differentiable-attr-func-specifier? # adjoint
# differentiable-attr-func-specifier? # jvp
# differentiable-attr-func-specifier? # vjp
# generic-where-clause?
# FIXME: There is currently no guarantee that 'MaybePrimal' is in fact
# the primal specifier, it could be any specifier. The current syntax
# definitions only ensure that there are between 0 and 4 function
# specifiers. A more robust definition would enforce that specific function
# specifiers appear only once, in order.
Node('DifferentiableAttributeArguments', kind='Syntax',
description='''
The arguments for the `@differentiable` attribute: an optional \
differentiation parameter list and associated functions.
''',
children=[
Child('DiffParams', kind='DifferentiationParamsClause',
is_optional=True),
Child('DiffParamsComma', kind='CommaToken', description='''
The comma following the differentiation parameters clause,
if it exists.
''', is_optional=True),
Child('MaybePrimal', kind='DifferentiableAttributeFuncSpecifier',
is_optional=True),
Child('MaybeAdjoint', kind='DifferentiableAttributeFuncSpecifier',
is_optional=True),
Child('MaybeJVP', kind='DifferentiableAttributeFuncSpecifier',
is_optional=True),
Child('MaybeVJP', kind='DifferentiableAttributeFuncSpecifier',
is_optional=True),
Child('WhereClause', kind='GenericWhereClause', is_optional=True),
]),
# differentiation-params-clause ->
# 'wrt' ':' (differentiation-param | differentiation-params)
Node('DifferentiationParamsClause', kind='Syntax',
description='A clause containing differentiation parameters.',
children=[
Child('WrtLabel', kind='IdentifierToken',
text_choices=['wrt'], description='The "wrt" label.'),
Child('Colon', kind='ColonToken', description='''
The colon separating "wrt" and the parameter list.
'''),
Child('Parameters', kind='Syntax',
node_choices=[
Child('Parameter', kind='DifferentiationParam'),
Child('ParameterList', kind='DifferentiationParams'),
]),
]),
# differentiation-params -> '(' differentiation-param-list ')'
Node('DifferentiationParams', kind='Syntax',
description='The differentiation parameters.',
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('DiffParams', kind='DifferentiationParamList',
collection_element_name='DifferentiationParam',
description='The parameters for differentiation.'),
Child('RightParen', kind='RightParenToken'),
]),
# differentiation-param-list ->
# differentiation-param differentiation-param-list?
Node('DifferentiationParamList', kind='SyntaxCollection',
element='DifferentiationParam'),
# differentiation-param -> ('self' | identifer) ','?
Node('DifferentiationParam', kind='Syntax',
description='''
A differentiation parameter: either the "self" identifier or a \
function parameter name.
''',
traits=['WithTrailingComma'],
children=[
Child('Parameter', kind='Syntax',
node_choices=[
Child('Self', kind='SelfToken'),
Child('Name', kind='IdentifierToken'),
]),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# differentiable-attr-func-specifier ->
# ('jvp' | 'vjp') ':' func-decl-name ','?
Node('DifferentiableAttributeFuncSpecifier', kind='Syntax',
description='''
A function specifier, consisting of an identifier, colon, and a \
function declaration name (e.g. `vjp: foo(_:_:)`).
''',
traits=['WithTrailingComma'],
children=[
Child('Label', kind='IdentifierToken',
text_choices=['jvp', 'vjp']),
Child('Colon', kind='ColonToken'),
Child('FunctionDeclName', kind='FunctionDeclName',
description='The referenced function name.'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# func-decl-name -> (identifier | operator) decl-name-arguments?
# NOTE: This is duplicated with `DeclName` above. Change `DeclName`
# description and use it if possible.
Node('FunctionDeclName', kind='Syntax',
description='A function declaration name (e.g. `foo(_:_:)`).',
children=[
Child('Name', kind='Syntax', description='''
The base name of the referenced function.
''',
node_choices=[
Child('Identifier', kind='IdentifierToken'),
Child('PrefixOperator', kind='PrefixOperatorToken'),
Child('SpacedBinaryOperator',
kind='SpacedBinaryOperatorToken'),
]),
Child('Arguments', kind='DeclNameArguments',
is_optional=True, description='''
The argument labels of the referenced function, optionally \
specified.
'''),
]),
# SWIFT_ENABLE_TENSORFLOW
# The argument of '@differentiating(...)'.
# differentiating-attr-arguments ->
# func-decl-name ','? differentiable-attr-parameters?
Node('DifferentiatingAttributeArguments', kind='Syntax',
description='''
The arguments for the `@differentiating` attribute: the original
function and an optional differentiation parameter list.
''',
children=[
Child('Original', kind='FunctionDeclName',
description='The referenced original function.'),
Child('Comma', kind='CommaToken', is_optional=True),
Child('DiffParams', kind='DifferentiationParamsClause',
is_optional=True),
]),
# SWIFT_ENABLE_TENSORFLOW
# The argument of '@transposing(...)'.
# transposing-attr-arguments ->
# func-decl-name ','? differentiable-attr-parameters?
Node('TransposingAttributeArguments', kind='Syntax',
description='''
The arguments for the `@transposing` attribute: the original
function and an optional differentiation parameter list.
''',
children=[
Child('Original', kind='FunctionDeclName',
description='The referenced original function.'),
Child('Comma', kind='CommaToken', is_optional=True),
Child('DiffParams', kind='DifferentiationParamsClause',
is_optional=True),
]),
# objc-selector-piece -> identifier? ':'?
Node('ObjCSelectorPiece', kind='Syntax',
description='''
A piece of an Objective-C selector. Either consisiting of just an \
identifier for a nullary selector, an identifier and a colon for a \
labeled argument or just a colon for an unlabeled argument
''',
children=[
Child('Name', kind='IdentifierToken', is_optional=True),
Child('Colon', kind='ColonToken', is_optional=True),
]),
# objc-selector -> objc-selector-piece objc-selector?
Node('ObjCSelector', kind='SyntaxCollection', element='ObjCSelectorPiece')
]
|
the-stack_0_4733 | from protocolbuffers import UI_pb2
from careers.career_enums import CareerCategory, WORK_CAREER_CATEGORIES
from careers.career_ops import CareerTimeOffReason
from date_and_time import TimeSpan, DateAndTime
from distributor.shared_messages import build_icon_info_msg, IconInfoData
from drama_scheduler.drama_node import BaseDramaNode, DramaNodeUiDisplayType, DramaNodeRunOutcome
from drama_scheduler.drama_node_types import DramaNodeType
from holidays.holiday_globals import HolidayState, HolidayTuning
from sims4.localization import TunableLocalizedStringFactory
from sims4.tuning.instances import lock_instance_tunables
from sims4.tuning.tunable import TunableReference, OptionalTunable
from sims4.utils import classproperty
from situations.bouncer.bouncer_types import RequestSpawningOption, BouncerRequestPriority
from situations.situation_guest_list import SituationGuestList, SituationGuestInfo
from situations.situation_types import SituationCallbackOption
from tunable_time import TunableTimeSpan
import alarms
import services
import sims4.log
import sims4.resources
logger = sims4.log.Logger('HolidayDramaNode', default_owner='nsavalani')
HOLIDAY_START_TIME_TOKEN = 'holiday_start_time_ticks'
HOLIDAY_END_TIME_TOKEN = 'holiday_end_time_ticks'
class HolidayDramaNode(BaseDramaNode):
INSTANCE_TUNABLES = {'pre_holiday_duration': TunableTimeSpan(description="\n This duration is used to calculate the drama node's start time for\n main holidays by subtracting the tuned amount from the globally \n tuned start time. The player is notified with a reminder for the\n holiday, and decorations will be put up in the neighborhood.\n For surprise holidays, this should be set to 0, as surprise \n holidays have no pre-holiday state.\n ", default_hours=23, locked_args={'days': 0, 'minutes': 0}), 'holiday': TunableReference(description='\n The holiday that this drama node starts.\n ', manager=services.get_instance_manager(sims4.resources.Types.HOLIDAY_DEFINITION))}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._holiday_alarm = None
self._state = None
self._situation_ids = []
self._holiday_end_time = None
self._active_household_id = None
self._holiday_start_time = None
@classproperty
def drama_node_type(cls):
return DramaNodeType.HOLIDAY
@classproperty
def persist_when_active(cls):
return True
@classproperty
def simless(cls):
return True
@property
def is_in_preholiday(self):
return self._state == HolidayState.PRE_DAY
@property
def is_running(self):
return self._state == HolidayState.RUNNING
@property
def holiday_id(self):
return self.holiday.guid64
@property
def day(self):
if self._holiday_start_time is not None:
return int(self._holiday_start_time.absolute_days())
actual_start_time = self._selected_time + self.pre_holiday_duration()
return int(actual_start_time.absolute_days())
def get_time_off_reason(self, sim_info, career_category, career_end_time):
holiday_service = services.holiday_service()
if self._state == HolidayState.SHUTDOWN or holiday_service is None:
return CareerTimeOffReason.NO_TIME_OFF
take_time_off = False
if career_category == CareerCategory.School:
take_time_off = holiday_service.get_holiday_time_off_school(self.holiday_id)
elif career_category in WORK_CAREER_CATEGORIES:
take_time_off = holiday_service.get_holiday_time_off_work(self.holiday_id)
elif career_category == CareerCategory.Volunteer or career_category == CareerCategory.UniversityCourse:
take_time_off = False
else:
logger.error('Unexpected CareerCategory {} when determining if a holiday should give Sims time off.', career_category)
if take_time_off and (self.is_running or self.get_calendar_start_time() < career_end_time):
return HolidayTuning.HOLIDAY_TIME_OFF_REASON
return CareerTimeOffReason.NO_TIME_OFF
def create_calendar_entry(self):
calendar_entry = super().create_calendar_entry()
active_household = services.active_household()
if active_household is not None:
holiday_service = services.holiday_service()
build_icon_info_msg(IconInfoData(icon_resource=holiday_service.get_holiday_display_icon(self.holiday_id)), holiday_service.get_holiday_display_name(self.holiday_id), calendar_entry.icon_info)
calendar_entry.holiday_id = self.holiday_id
for tradition in holiday_service.get_holiday_traditions(self.holiday_id):
calendar_entry.tradition_ids.append(tradition.guid64)
return calendar_entry
def create_calendar_alert(self):
if self.ui_display_type == DramaNodeUiDisplayType.POP_UP_HOLIDAY:
return
holiday_service = services.holiday_service()
calendar_alert = super().create_calendar_alert()
calendar_alart_description = holiday_service.get_holiday_calendar_alert_notification(self.holiday_id)
if calendar_alart_description is not None:
calendar_alert.description = calendar_alart_description(holiday_service.get_holiday_display_name(self.holiday_id))
build_icon_info_msg(IconInfoData(icon_resource=holiday_service.get_holiday_display_icon(self.holiday_id)), holiday_service.get_holiday_display_name(self.holiday_id), calendar_alert.calendar_icon)
for tradition in holiday_service.get_holiday_traditions(self.holiday_id):
calendar_alert.tradition_ids.append(tradition.guid64)
return calendar_alert
def get_calendar_start_time(self):
return self.selected_time.time_of_next_day_time(HolidayTuning.MAIN_HOLIDAY_START_TIME)
def get_calendar_end_time(self):
return self.get_calendar_start_time() + HolidayTuning.HOLIDAY_DURATION()
def _run_pre_holiday(self, from_load=False):
self._state = HolidayState.PRE_DAY
now = services.time_service().sim_now
time_to_holiday_start = now.time_till_next_day_time(HolidayTuning.MAIN_HOLIDAY_START_TIME)
self._holiday_start_time = now + time_to_holiday_start
self._holiday_alarm = alarms.add_alarm(self, time_to_holiday_start, lambda _: self._run_holiday())
active_household = services.active_household()
active_household.holiday_tracker.preactivate_holiday(self.holiday_id)
self._active_household_id = active_household.id
lot_decoration_service = services.lot_decoration_service()
if lot_decoration_service is not None:
lot_decoration_service.request_holiday_decorations(self, from_load=from_load)
def _on_holiday_situation_ended(self, situation_id, callback_option, _):
current_zone = services.current_zone()
if current_zone.is_zone_shutting_down:
return
self._unregister_situation_ended_callbacks()
self._end_holiday()
active_household = services.active_household()
if active_household is not None:
active_household.holiday_tracker.cancel_holiday(self.holiday_id)
def _register_situation_ended_callbacks(self):
situation_manager = services.get_zone_situation_manager()
for situation_id in self._situation_ids:
situation_manager.register_for_callback(situation_id, SituationCallbackOption.END_OF_SITUATION, self._on_holiday_situation_ended)
def _unregister_situation_ended_callbacks(self):
situation_manager = services.get_zone_situation_manager()
for situation_id in self._situation_ids:
situation_manager.unregister_callback(situation_id, SituationCallbackOption.END_OF_SITUATION, self._on_holiday_situation_ended)
def _run_holiday(self, from_load=False):
self._state = HolidayState.RUNNING
if not from_load:
self._holiday_end_time = services.time_service().sim_now + HolidayTuning.HOLIDAY_DURATION()
holiday_duration = HolidayTuning.HOLIDAY_DURATION()
else:
holiday_duration = self._holiday_end_time - services.time_service().sim_now
self._holiday_alarm = alarms.add_alarm(self, holiday_duration, self._holiday_end_callback)
active_household = services.active_household()
holiday_tracker = active_household.holiday_tracker
if holiday_tracker.is_holiday_cancelled(self.holiday_id):
return
holiday_tracker.activate_holiday(self.holiday_id, from_load=from_load)
self._active_household_id = active_household.id
lot_decoration_service = services.lot_decoration_service()
if lot_decoration_service is not None:
lot_decoration_service.request_holiday_decorations(self, from_load=from_load)
if from_load:
(situation_ids, sims_needing_situations) = holiday_tracker.load_holiday_situations(self.holiday_id)
self._situation_ids.extend(situation_ids)
if not sims_needing_situations:
self._register_situation_ended_callbacks()
return
else:
sims_needing_situations = [sim_info for sim_info in active_household.sim_infos if sim_info.is_human]
holiday_service = services.holiday_service()
holiday_goals = list(tradition.situation_goal for tradition in holiday_service.get_holiday_traditions(self.holiday_id))
for sim_info in sims_needing_situations:
situation_id = self._create_holiday_situation(sim_info, holiday_goals)
self._register_situation_ended_callbacks()
def on_sim_added(self, sim_info):
if self._state != HolidayState.RUNNING:
return
holiday_goals = list(tradition.situation_goal for tradition in services.holiday_service().get_holiday_traditions(self.holiday_id))
situation_id = self._create_holiday_situation(sim_info, holiday_goals)
if situation_id:
services.get_zone_situation_manager().register_for_callback(situation_id, SituationCallbackOption.END_OF_SITUATION, self._on_holiday_situation_ended)
def _create_holiday_situation(self, sim_info, holiday_goals):
guest_list = SituationGuestList(invite_only=True, host_sim_id=sim_info.id)
guest_list.add_guest_info(SituationGuestInfo(sim_info.id, HolidayTuning.HOLIDAY_JOB, RequestSpawningOption.DONT_CARE, BouncerRequestPriority.EVENT_VIP))
situation_id = services.get_zone_situation_manager().create_situation(HolidayTuning.HOLIDAY_SITUATION, guest_list=guest_list, linked_sim_id=sim_info.id, dynamic_goals=holiday_goals)
if situation_id:
self._situation_ids.append(situation_id)
return situation_id
def _give_time_off_loot(self, sim_info, time_off_loot):
if sim_info is not None and time_off_loot is not None:
resolver = sim_info.get_resolver()
time_off_loot.apply_to_resolver(resolver)
def _end_holiday(self):
active_household = services.active_household()
if not active_household.holiday_tracker.is_holiday_cancelled(self.holiday_id):
self._unregister_situation_ended_callbacks()
for situation_id in self._situation_ids:
services.get_zone_situation_manager().destroy_situation_by_id(situation_id)
active_household.holiday_tracker.deactivate_holiday()
def _holiday_end_callback(self, _):
self._state = HolidayState.SHUTDOWN
self._unregister_situation_ended_callbacks()
self._end_holiday()
services.drama_scheduler_service().complete_node(self.uid)
def schedule(self, resolver, specific_time=None, time_modifier=TimeSpan.ZERO):
self._state = HolidayState.INITIALIZED
success = super().schedule(resolver, specific_time=specific_time, time_modifier=time_modifier)
if success:
services.calendar_service().mark_on_calendar(self, advance_notice_time=HolidayTuning.HOLIDAY_DURATION())
return success
def cleanup(self, from_service_stop=False):
if self._holiday_alarm is not None:
self._holiday_alarm.cancel()
self._holiday_alarm = None
services.calendar_service().remove_on_calendar(self.uid)
super().cleanup(from_service_stop=from_service_stop)
if self._state == HolidayState.PRE_DAY:
household = services.household_manager().get(self._active_household_id)
if household is not None:
household.holiday_tracker.deactivate_pre_holiday()
elif self._state == HolidayState.RUNNING:
household = services.household_manager().get(self._active_household_id)
if household is not None and not household.holiday_tracker.is_holiday_cancelled(self.holiday_id):
household.holiday_tracker.deactivate_holiday()
if self._state in (HolidayState.PRE_DAY, HolidayState.RUNNING, HolidayState.SHUTDOWN):
lot_decoration_service = services.lot_decoration_service()
if lot_decoration_service is not None:
lot_decoration_service.cancel_decoration_requests_for(self)
def _select_time(self, specific_time=None, time_modifier=TimeSpan.ZERO):
if specific_time is None:
result = super()._select_time(time_modifier=time_modifier)
if not result:
return result
drama_scheduler_service = services.drama_scheduler_service()
for drama_node in drama_scheduler_service.scheduled_nodes_gen():
if drama_node.drama_node_type != DramaNodeType.HOLIDAY and drama_node.drama_node_type != DramaNodeType.PLAYER_PLANNED:
continue
if drama_node.day == self.day:
return False
return True
holiday_start_time = specific_time.time_of_next_day_time(HolidayTuning.MAIN_HOLIDAY_START_TIME)
now = services.time_service().sim_now
if holiday_start_time < now:
return False
selected_time = holiday_start_time + self.pre_holiday_duration()*-1
if selected_time < now:
selected_time = now + TimeSpan.ONE
self._selected_time = selected_time
return True
def _save_custom_data(self, writer):
if self._holiday_start_time is not None:
writer.write_uint64(HOLIDAY_START_TIME_TOKEN, self._holiday_start_time.absolute_ticks())
if self._holiday_end_time is not None:
writer.write_uint64(HOLIDAY_END_TIME_TOKEN, self._holiday_end_time.absolute_ticks())
def _load_custom_data(self, reader):
holiday_start_time_ticks = reader.read_uint64(HOLIDAY_START_TIME_TOKEN, None)
if holiday_start_time_ticks is not None:
self._holiday_start_time = DateAndTime(holiday_start_time_ticks)
holiday_end_time_ticks = reader.read_uint64(HOLIDAY_END_TIME_TOKEN, None)
if holiday_end_time_ticks is not None:
self._holiday_end_time = DateAndTime(holiday_end_time_ticks)
if self._holiday_start_time and not self._holiday_end_time and self._holiday_start_time + HolidayTuning.HOLIDAY_DURATION() < services.time_service().sim_now:
return False
return True
def resume(self):
now = services.time_service().sim_now
if now < self._holiday_start_time:
self._run_pre_holiday(from_load=True)
else:
self._run_holiday(from_load=True)
def _run(self):
if self.pre_holiday_duration().in_ticks() == 0:
self._run_holiday()
self._holiday_start_time = services.time_service().sim_now
else:
self._run_pre_holiday()
return DramaNodeRunOutcome.SUCCESS_NODE_INCOMPLETE
def load(self, drama_node_proto, schedule_alarm=True):
super_success = super().load(drama_node_proto, schedule_alarm=schedule_alarm)
if not super_success:
return False
services.calendar_service().mark_on_calendar(self, advance_notice_time=HolidayTuning.HOLIDAY_DURATION())
return True
lock_instance_tunables(HolidayDramaNode, ui_display_data=None)
HOLIDAY_ID_TOKEN = 'holiday_id'
class CustomHolidayDramaNode(HolidayDramaNode):
REMOVE_INSTANCE_TUNABLES = ('holiday',)
def __init__(self, *args, holiday_id=None, **kwargs):
super().__init__(*args, **kwargs)
self._holiday_id = holiday_id
@property
def holiday_id(self):
return self._holiday_id
def _save_custom_data(self, writer):
super()._save_custom_data(writer)
writer.write_uint64(HOLIDAY_ID_TOKEN, self._holiday_id)
def _load_custom_data(self, reader):
self._holiday_id = reader.read_uint64(HOLIDAY_ID_TOKEN, None)
return super()._load_custom_data(reader)
|
the-stack_0_4734 | import ray
from copy import deepcopy
from leaderboard.leaderboard_evaluator import LeaderboardEvaluator
from leaderboard.utils.statistics_manager import StatisticsManager
class ChallengeRunner():
def __init__(self, args, scenario, route, port=1000, tm_port=1002, debug=False):
args = deepcopy(args)
# Inject args
args.scenario_class = 'route_scenario'
args.port = port
args.trafficManagerPort = tm_port
args.scenarios = scenario
args.routes = route
args.debug = debug
args.record = ''
self.runner = LeaderboardEvaluator(args, StatisticsManager())
self.args = args
def run(self):
return self.runner.run(self.args)
|
the-stack_0_4736 | import os
import boto3
from base import LambdaFunctionBase
class CWScheduledEventManageEC2State(LambdaFunctionBase):
"""
Class starting or stopping EC2 instances not part of a AutoScaling group.
"""
# Section specific to the lambda.
ACTION = os.environ['PARAM_ACTION']
RESOURCE_TAG_KEY = os.environ['PARAM_RESOURCE_TAG_KEY']
RESOURCE_TAG_VALUE = os.environ['PARAM_RESOURCE_TAG_VALUE']
AWS_REGIONS = os.environ['PARAM_AWS_REGIONS'].split(',')
def _get_ec2_instance_ids_by_tag(self, aws_region_name, instance_state, tag_key, tag_value):
""" Returns all resources identifiers linked to tag. """
ec2_client = boto3.client('ec2', region_name=aws_region_name)
autoscaling_client = boto3.client('autoscaling', region_name=aws_region_name)
# Finds EC2 instances.
resource_pages = ec2_client.get_paginator('describe_instances').paginate(
Filters=[
{
'Name': f'tag:{tag_key}',
'Values': [
tag_value
]
},
{
'Name': 'instance-state-name',
'Values': [
instance_state
]
}
]
)
# Browse EC2 instances and exclude EC2 member of a AutoScalingGroup.
ec2_instance_ids = []
for resource_page in resource_pages:
for resource in resource_page['Reservations']:
for ec2_instance in resource['Instances']:
ec2_instance_id = ec2_instance['InstanceId']
# Check if part of an autoscaling group.
is_part_of_autoscaling_group = len(autoscaling_client.describe_auto_scaling_instances(
InstanceIds=[
ec2_instance_id,
]
)['AutoScalingInstances']) > 0
# If not, the instance is eligible.
if not is_part_of_autoscaling_group:
self.logger.debug('>> Instance %s is eligible.', ec2_instance_id)
ec2_instance_ids.append(ec2_instance_id)
else:
self.logger.debug('>> Instance %s is not eligible as part of an AutoScaling Group.', ec2_instance_id)
return ec2_instance_ids
def _stop_ec2_instances(self, aws_region_name, ec2_instance_ids):
""" Stop the EC2 instances. """
ec2_client = boto3.client('ec2', region_name=aws_region_name)
self.logger.info('> Stopping EC2 instances.')
for ec2_instance_id in ec2_instance_ids:
self.logger.debug('>> Stopping instance %s.', ec2_instance_id)
ec2_client.stop_instances(InstanceIds=[ec2_instance_id])
self.logger.info('>> EC2 Instance %s => [STOPPED].', ec2_instance_id)
def _start_ec2_instances(self, aws_region_name, ec2_instance_ids):
""" Start the EC2 instances. """
ec2_client = boto3.client('ec2', region_name=aws_region_name)
self.logger.info('> Starting EC2 instances.')
for ec2_instance_id in ec2_instance_ids:
self.logger.debug('>> Starting instance %s.', ec2_instance_id)
ec2_client.start_instances(InstanceIds=[ec2_instance_id])
self.logger.info('>> EC2 Instance %s => [RUNNING].', ec2_instance_id)
def _execute(self, event, context): # pylint: disable=W0613
""" Execute the method. """
self.logger.info('Starting the operation.')
if self.ACTION in ['enable', 'start']:
ec2_instance_state = 'stopped'
elif self.ACTION in ['disable', 'stop']:
ec2_instance_state = 'running'
else:
raise Exception('Unexpected action.')
for aws_region_name in self.AWS_REGIONS:
self.logger.info('> Searching EC2 instances in region %s having tag %s=%s and state=%s.',
aws_region_name, self.RESOURCE_TAG_KEY, self.RESOURCE_TAG_VALUE, ec2_instance_state)
# Get EC2 by tag.
ec2_instance_ids = self._get_ec2_instance_ids_by_tag(aws_region_name, ec2_instance_state, self.RESOURCE_TAG_KEY, self.RESOURCE_TAG_VALUE)
self.logger.info('> Found %s EC2 instances in region %s having tag %s=%s and state=%s.',
str(len(ec2_instance_ids)), aws_region_name, self.RESOURCE_TAG_KEY, self.RESOURCE_TAG_VALUE, ec2_instance_state)
# Start/Stop
if len(ec2_instance_ids) > 0:
if self.ACTION in ['enable', 'start']:
self._start_ec2_instances(aws_region_name, ec2_instance_ids)
elif self.ACTION in ['disable', 'stop']:
self._stop_ec2_instances(aws_region_name, ec2_instance_ids)
self.logger.info('Operation completed successfully.')
return self._build_response_ok()
def lambda_handler(event, context):
""" Function invoked by AWS. """
return CWScheduledEventManageEC2State().process_event(event, context)
|
the-stack_0_4737 | # -*- coding: utf-8 -*-
task = Task("MainStory69",desc = u"自动主线69",pretask = ["MainStory15"])
#task.addSetupActionSet("RefreshGame",tag="pre1",desc="RefreshGame")
#task.addTeardownActionSet("TypeCommand",tag= "end1",desc = u"清除所有任务",mp = {'command':'$cleartask 1'})
#task.addTeardownActionSet("TypeCommand",tag= "end2",desc = u"重置等级为1级",mp ={'command':'$r who.Base.m_Grade = 1;who.Base.m_Exp=0;who.CalculateProp();who.SendPropChange();'})
#task.addTeardownActionSet("TypeCommand",tag= "end3",desc = u"添加主线任务",mp = {'command':'$task 10000'})
tasksuit.addTask(task)
step = Step("step0.5",u"预处理")
task.addStep(step)
#step.addActionSet("InputNamePsd",tag = "login",desc = "login input username and password", mp={'username':'autotest1','password':'123456'})
#step.addActionSet("TypeCommand",tag = "0.5",desc = u"获得万劫不复技能", mp = {'command':'$setskill 2099 1'})
#action 0.1
arg = { "detectRegion": gl.AREA_ICON_MAP ,"imagePattern" : "icon_map.png",
"clk_region" : GetRegionFromGrid(16) ,"clk_ptn" : Location(GetRegionFromGrid(16).getX()+40, GetRegionFromGrid(16).getY()+40),
"failResponse" : "Fail"}
act = ClickAction(tag = "0.1",desc=u"点击人物角色信息", **arg)
step.addAction(act)
arg = { "detectRegion": GetRegionFromGrid(140,141) ,"imagePattern" : "levelupbutton.png",
"failResponse" : "Fail"}
act = ClickAction(tag = "0.2",desc=u"点击升级按钮", **arg)
step.addAction(act)
arg = { "detectRegion": GetRegionFromGrid(89,91) ,"imagePattern" : "btn_queren.png",
"failResponse" : "Fail"}
act = ClickAction(tag = "0.3",desc=u"点击确认按钮", **arg)
step.addAction(act)
arg = { "time" : 1}
act = SleepAction(tag = "0.4",desc=u"wait 1s", **arg)
step.addAction(act)
#注意关闭按钮的相似度,越小的图越要用sikuli ide的匹配功能进行测试,防止出现误识别
arg = { "detectRegion": GetRegionFromGrid(13,30) ,"imagePattern" : Pattern("btn_close_welfare_center.png").similar(0.80),
"failResponse" : "Fail"}
act = ClickAction(tag = "0.41",desc=u"关闭人物信息", **arg)
step.addAction(act)
arg = { "time" : 1}
act = SleepAction(tag = "0.42",desc=u"wait 1s", **arg)
step.addAction(act)
step.addActionSet("TypeCommand",tag = "0.5",desc = u"升一级", mp = {'command':'$addexp 1000000000'})
step.addActionSet("TypeCommand",tag = "0.6",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.7",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.8",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.8",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.8",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.9",desc = u"升一级", mp = {'command':'$r who.Base.m_Grade = 92;who.Base.m_Exp=0;who.CalculateProp();who.SendPropChange();'})
#step1
step = Step("step1",u"主循环")
task.addStep(step)
#action1.7
arg = { "detectRegion": GetRegionFromGrid(13,30) ,"imagePattern" : Pattern("btn_close_welfare_center.png").similar(0.80),
"failResponse" : "Ignore" ,"loopWaitingTime": 0 }
act = ClickAction(tag = "1.7",desc=u"关闭人物信息(防止误操作)", **arg)
step.addAction(act)
#act2
arg = { "detectRegion": gl.AREA_BTN_USEITEM ,"imagePattern" : "btn_equip.png",
"loopWaitingTime": 0 ,"successNext" : ["step","step2"],
"failResponse" : "Ignore" ,"loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "2", desc = u"是否有装备窗口", **arg)
step.addAction(act)
#act3
arg = { "detectRegion": gl.AREA_BTN_USEITEM ,"imagePattern" : "btn_useitem.png",
"loopWaitingTime": 0 ,"successNext" : ["step","step2"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "3", desc = u"是否有使用道具窗口", **arg)
step.addAction(act)
#act4
arg = { "detectRegion": gl.AREA_BTN_SKIPSTORY ,"imagePattern" : Pattern("btn_skip_story.png").similar(0.60),
"loopWaitingTime": 0 ,"successNext" : ["step","step3"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "4", desc = u"是否在剧情或对话中", **arg)
step.addAction(act)
#act5
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime": 0 ,"successNext" : ["step","step4"],
"failResponse" : "Ignore","loopSleepTime" : 0.1, "failNext" : ["step","step1"],
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "5", desc = u"任务栏是否有主线", **arg)
step.addAction(act)
#Step2
step = Step("step2",u"处理道具装备")
task.addStep(step)
#action1
arg = { "detectRegion" : gl.AREA_BTN_USEITEM, "imagePattern" : "btn_useitem.PNG",
"loopWaitingTime" : 0 , "failResponse" : "Ignore" ,
"loopRegion": gl.AREA_BTN_USEITEM ,"loopPattern" :"btn_useitem.PNG",
"loopTime" : 5 ,"loopType" : 0 ,
"loopSleepTime" : 0.1 ,"saveImage" : True}
act = ClickAction(tag = "1", desc = u"使用道具", **arg)
step.addAction(act)
#action2
arg = { "detectRegion" : gl.AREA_BTN_USEITEM, "imagePattern" : "btn_equip.PNG",
"loopWaitingTime" : 0 , "failResponse" : "Ignore" ,
"loopRegion": gl.AREA_BTN_USEITEM ,"loopPattern" :"btn_equip.PNG",
"loopSleepTime" : 0.1 ,"saveImage" : True,
"loopTime" : 5 ,"loopType" : 0 }
act = ClickAction(tag = "2", desc = u"使用装备", **arg)
step.addAction(act)
#action3
act = Jump(tag = "3", desc = u"返回主循环", target = ["step","step1"])
step.addAction(act)
#Step3
step = Step("step3", desc = u"处理剧情中")
task.addStep(step)
#action1
arg = { "detectRegion": gl.AREA_BTN_SKIPSTORY ,"imagePattern" : Pattern("btn_skip_story.png").similar(0.60),
"loopWaitingTime": 0 ,"failNext" : ["step","step1"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "1", desc = u"是否在剧情或对话中,不在则返回主循环Step1", **arg)
step.addAction(act)
#action2
arg = { "detectRegion": GetRegionFromGrid(76, 112) ,"imagePattern" : "enterbattle.png",
"loopWaitingTime": 0 ,"next" : ["step","step6"],
"failResponse" : "Ignore" ,"saveImage" : True,
"loopSleepTime" : 0.1}
act = ClickAction(tag = "2", desc = u"如果有开始战斗则点击直到消失,并进入战斗Step", **arg)
step.addAction(act)
#action3
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime": 0 ,"next" : ["step","step4"],
"failResponse" : "Ignore","loopSleepTime" : 0.1 ,"saveImage" : True,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "3", desc = u"如果有主线则点主线", **arg)
step.addAction(act)
#action 4
arg = { "detectRegion" : gl.AREA_BTN_SKIPSTORY, "imagePattern" : Pattern("btn_skip_story.png").similar(0.60),
"loopWaitingTime" : 0 , "failResponse" : "Ignore" ,
"loopSleepTime" : 0.3, "saveImage" : False,
"loopRegion": gl.AREA_BTN_SKIPSTORY ,"loopPattern" :Pattern("btn_skip_story.png").similar(0.60),
"loopTime" : 8 ,"loopType" : 0 }
act = ClickAction(tag = "4",desc=u"点击跳过", **arg)
step.addAction(act)
#action 5
arg = { "time":1}
act = SleepAction(tag = "5",desc=u"sleep 1s", **arg)
step.addAction(act)
#action 6
act = Jump(tag = "6", desc = u"返回继续处理剧情", target = ["action","1"])
step.addAction(act)
#Step4
step = Step("step4",u"处理剧情追踪")
task.addStep(step)
#act0.5
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime": 0 ,"saveImage" : True,
"failNext" : ["step","step3"] ,"failResponse" : "Ignore",
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "0.5", desc = u"任务栏是否有主线", **arg)
step.addAction(act)
#action0.6
arg = { "detectRegion": GetRegionFromGrid(60, 112) ,"imagePattern" : "special_zhuxian.png",
"loopWaitingTime": 0 ,
"failResponse" : "Ignore"}
act = ClickAction(tag = "0.6", desc = u"特殊主线", **arg)
step.addAction(act)
#act1
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("jingqingqidai.png").similar(0.80),
"loopWaitingTime": 0 ,"successNext" : ["step","step7"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "1", desc = u"主线任务为敬请期待则结束任务完成", **arg)
step.addAction(act)
#action2
arg = { "detectRegion" : GetRegionFromGrid(45, 128), "imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime" : 1 , "failResponse" : "Ignore" ,
"loopSleepTime" : 0.1, "saveImage" : False,
"loopRegion": GetRegionFromGrid(45, 128) ,"loopPattern" :Pattern("main_story.png").similar(0.60),
"loopTime" : 5 ,"loopType" : 0 }
act = ClickAction(tag = "2", desc = u"循环点击主线直到消失", **arg)
step.addAction(act)
#action3
act = Jump(tag = "3", desc = u"jump to step4 action1",target=["action","0.5"])
step.addAction(act)
#Step6
step = Step("step6",u"自动战斗")
task.addStep(step)
step.addActionSet("AutoBattle",tag = "1",desc = u"自动战斗actionset", mp = {})
act = Jump(tag = "1", desc = u"jump to step1", target=['step','step1'])
step.addAction(act)
#Step7
#Step7
step = Step("step7",u"结束Task")
task.addStep(step)
arg = {'time':1}
act = SleepAction(tag="end sleep",desc = u"准备结束该任务",**arg)
step.addAction(act) |
the-stack_0_4738 | #!/usr/bin/env python3
import json
import os
import sys
import requests
base_url = "https://api.northwell.edu/"
url = "https://api.northwell.edu/v2/vax-locations/all"
def get_paginated_urls():
response = requests.get(url)
data = response.json()
return [page_url["url"] for page_url in data["response"]["pagination"]["display"]]
def get_locations(page_url):
response = requests.get(base_url + page_url)
data = response.json()
return data["response"]["locations"]
def main():
output_dir = sys.argv[1]
if output_dir is None:
raise Exception("Must pass an output_dir as first argument")
page_urls = get_paginated_urls()
for index, page_url in enumerate(page_urls):
locations = get_locations(page_url)
output_file_path = os.path.join(output_dir, f"output{index}.json")
with open(output_file_path, "w", encoding="utf-8") as f:
json.dump(locations, f, ensure_ascii=False, indent=4)
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_4741 | #!/usr/bin/env python
import argparse
import boto3
import pandas as pd
import sagemaker
import json
from sagemaker.deserializers import JSONDeserializer
from sagemaker.serializers import JSONSerializer
from botocore.exceptions import ClientError
import logging
import traceback
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
if __name__=='__main__':
parser = argparse.ArgumentParser()
# parameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--ticker-cik", type=str, default='amzn')
parser.add_argument("--endpoint-name", type=str)
parser.add_argument("--region", type=str)
args, _ = parser.parse_known_args()
sagemaker_session = sagemaker.Session(boto3.session.Session(region_name=args.region))
#get the json data
f = open(f'/opt/ml/processing/input/10k10q/{args.ticker_cik}_10k_10q_summary.json',)
# returns JSON object as
# a dictionary
sec_summary = json.load(f)
sec_summary['inputs'] = sec_summary['inputs'][:2500]
sec_summary['source'] = f'{args.ticker_cik} SEC Report'
sec_df = pd.json_normalize(sec_summary)
sec_df = sec_df[['source', 'inputs']]
articles_df = pd.read_csv(f'/opt/ml/processing/input/articles/{args.ticker_cik}_articles.csv')
articles_df = articles_df[['source.name', 'content', 'description']]
articles_df['inputs'] = articles_df[['content', 'description']].apply(lambda x: ''.join(x), axis=1)
articles_df.drop(['content', 'description'], axis=1, inplace=True)
articles_df.rename(columns={'source.name': 'source'}, inplace=True)
df = sec_df.append(articles_df,ignore_index=True)
data={}
data['inputs'] = df['inputs'].tolist()
#initialize predictor from Endpoint
predictor = sagemaker.predictor.Predictor(endpoint_name=args.endpoint_name,
sagemaker_session=sagemaker_session,
serializer=JSONSerializer(),
deserializer=JSONDeserializer())
# predict for all chunks
try:
response = predictor.predict(data)
response_df = pd.json_normalize(response)
response_df['source'] = df['source']
response_df=response_df[['source', 'label', 'score']]
response_df.to_csv(f'/opt/ml/processing/output/{args.ticker_cik}_sentiment_result.csv', index=False)
except ClientError as e:
stacktrace = traceback.format_exc()
error_message = e.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message) |
the-stack_0_4744 | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides ``kedro.config`` with the functionality to load one
or more configuration files from specified paths.
"""
import logging
from glob import iglob
from pathlib import Path
from typing import AbstractSet, Any, Dict, Iterable, List, Set, Union
from warnings import warn
SUPPORTED_EXTENSIONS = [
".yml",
".yaml",
".json",
".ini",
".pickle",
".properties",
".xml",
]
class MissingConfigException(Exception):
"""Raised when no configuration files can be found within a config path"""
pass
class BadConfigException(Exception):
"""Raised when a configuration file cannot be loaded, for instance
due to wrong syntax or poor formatting.
"""
pass
class ConfigLoader:
"""Recursively scan the directories specified in ``conf_paths`` for
configuration files with a ``yaml``, ``yml``, ``json``, ``ini``,
``pickle``, ``xml`` or ``properties`` extension, load them,
and return them in the form of a config dictionary.
When the same top-level key appears in any 2 config files located in
the same ``conf_path`` (sub)directory, a ``ValueError`` is raised.
When the same key appears in any 2 config files located in different
``conf_path`` directories, the last processed config path takes
precedence and overrides this key.
For example, if your ``conf_path`` looks like this:
::
.
`-- conf
|-- README.md
|-- base
| |-- catalog.yml
| |-- logging.yml
| `-- experiment1
| `-- parameters.yml
`-- local
|-- catalog.yml
|-- db.ini
|-- experiment1
| |-- parameters.yml
| `-- model_parameters.yml
`-- experiment2
`-- parameters.yml
You can access the different configurations as follows:
::
>>> import logging.config
>>> from kedro.config import ConfigLoader
>>>
>>> conf_paths = ['conf/base', 'conf/local']
>>> conf_loader = ConfigLoader(conf_paths)
>>>
>>> conf_logging = conf_loader.get('logging*')
>>> logging.config.dictConfig(conf_logging) # set logging conf
>>>
>>> conf_catalog = conf_loader.get('catalog*', 'catalog*/**')
>>> conf_params = conf_loader.get('**/parameters.yml')
"""
def __init__(self, conf_paths: Union[str, Iterable[str]]):
"""Instantiate a ConfigLoader.
Args:
conf_paths: Non-empty path or list of paths to configuration
directories.
Raises:
ValueError: If ``conf_paths`` is empty.
"""
if not conf_paths:
raise ValueError(
"`conf_paths` must contain at least one path to "
"load configuration files from."
)
if isinstance(conf_paths, str):
conf_paths = [conf_paths]
self.conf_paths = _remove_duplicates(conf_paths)
self.logger = logging.getLogger(__name__)
@staticmethod
def _load_config_file(config_file: Path) -> Dict[str, Any]:
"""Load an individual config file using `anyconfig` as a backend.
Args:
config_file: Path to a config file to process.
Raises:
BadConfigException: If configuration is poorly formatted and
cannot be loaded.
Returns:
Parsed configuration.
"""
# for performance reasons
import anyconfig # pylint: disable=import-outside-toplevel
try:
# Default to UTF-8, which is Python 3 default encoding, to decode the file
with open(config_file, encoding="utf8") as yml:
return {
k: v
for k, v in anyconfig.load(yml).items()
if not k.startswith("_")
}
except AttributeError as exc:
raise BadConfigException(
f"Couldn't load config file: {config_file}"
) from exc
def _load_configs(self, config_filepaths: List[Path]) -> Dict[str, Any]:
"""Recursively load all configuration files, which satisfy
a given list of glob patterns from a specific path.
Args:
config_filepaths: Configuration files sorted in the order of precedence.
Raises:
ValueError: If 2 or more configuration files contain the same key(s).
BadConfigException: If configuration is poorly formatted and
cannot be loaded.
Returns:
Resulting configuration dictionary.
"""
aggregate_config = {}
seen_file_to_keys = {} # type: Dict[Path, AbstractSet[str]]
for config_filepath in config_filepaths:
single_config = self._load_config_file(config_filepath)
_check_duplicate_keys(seen_file_to_keys, config_filepath, single_config)
seen_file_to_keys[config_filepath] = single_config.keys()
aggregate_config.update(single_config)
return aggregate_config
def _lookup_config_filepaths(
self, conf_path: Path, patterns: Iterable[str], processed_files: Set[Path]
) -> List[Path]:
config_files = _path_lookup(conf_path, patterns)
seen_files = config_files & processed_files
if seen_files:
self.logger.warning(
"Config file(s): %s already processed, skipping loading...",
", ".join(str(seen) for seen in sorted(seen_files)),
)
config_files -= seen_files
return sorted(config_files)
def get(self, *patterns: str) -> Dict[str, Any]:
"""Recursively scan for configuration files, load and merge them, and
return them in the form of a config dictionary.
Args:
patterns: Glob patterns to match. Files, which names match
any of the specified patterns, will be processed.
Raises:
ValueError: If 2 or more configuration files inside the same
config path (or its subdirectories) contain the same
top-level key.
MissingConfigException: If no configuration files exist within
a specified config path.
BadConfigException: If configuration is poorly formatted and
cannot be loaded.
Returns:
Dict[str, Any]: A Python dictionary with the combined
configuration from all configuration files. **Note:** any keys
that start with `_` will be ignored.
"""
if not patterns:
raise ValueError(
"`patterns` must contain at least one glob "
"pattern to match config filenames against."
)
config = {} # type: Dict[str, Any]
processed_files = set() # type: Set[Path]
for conf_path in self.conf_paths:
if not Path(conf_path).is_dir():
raise ValueError(
f"Given configuration path either does not exist "
f"or is not a valid directory: {conf_path}"
)
logging.info("IN GET. STARTGIN __LOOKUP_CONFIG_FILE_PATHS")
config_filepaths = self._lookup_config_filepaths(
Path(conf_path), patterns, processed_files
)
logging.info("COMPLETED _LOOKUP_CONFIG_FILE_PATHS. STARTING _LOAD_CONFIGS.")
new_conf = self._load_configs(config_filepaths)
logging.info("COMPLETED _LOAD_CONFIGS.")
common_keys = config.keys() & new_conf.keys()
if common_keys:
sorted_keys = ", ".join(sorted(common_keys))
msg = (
"Config from path `%s` will override the following "
"existing top-level config keys: %s"
)
self.logger.info(msg, conf_path, sorted_keys)
config.update(new_conf)
processed_files |= set(config_filepaths)
logging.info("COMPLETED CONFIG UPDATE")
if not processed_files:
raise MissingConfigException(
f"No files found in {self.conf_paths} matching the glob "
f"pattern(s): {list(patterns)}"
)
return config
def _check_duplicate_keys(
processed_files: Dict[Path, AbstractSet[str]], filepath: Path, conf: Dict[str, Any]
) -> None:
duplicates = []
for processed_file, keys in processed_files.items():
overlapping_keys = conf.keys() & keys
if overlapping_keys:
sorted_keys = ", ".join(sorted(overlapping_keys))
if len(sorted_keys) > 100:
sorted_keys = sorted_keys[:100] + "..."
duplicates.append(f"{processed_file}: {sorted_keys}")
if duplicates:
dup_str = "\n- ".join(duplicates)
raise ValueError(f"Duplicate keys found in {filepath} and:\n- {dup_str}")
def _path_lookup(conf_path: Path, patterns: Iterable[str]) -> Set[Path]:
"""Return a set of all configuration files from ``conf_path`` or
its subdirectories, which satisfy a given list of glob patterns.
Args:
conf_path: Path to configuration directory.
patterns: List of glob patterns to match the filenames against.
Returns:
A set of paths to configuration files.
"""
config_files = set()
conf_path = conf_path.resolve()
for pattern in patterns:
# `Path.glob()` ignores the files if pattern ends with "**",
# therefore iglob is used instead
for each in iglob(str(conf_path / pattern), recursive=True):
path = Path(each).resolve()
if path.is_file() and path.suffix in SUPPORTED_EXTENSIONS:
config_files.add(path)
return config_files
def _remove_duplicates(items: Iterable[str]):
"""Remove duplicates while preserving the order."""
unique_items = [] # type: List[str]
for item in items:
if item not in unique_items:
unique_items.append(item)
else:
warn(
f"Duplicate environment detected! "
f"Skipping re-loading from configuration path: {item}"
)
return unique_items
|
the-stack_0_4745 | from django.views.generic import ListView
from constance import config
from learning.models import Tutorial, Category
from learning.filters import TutorialArchiveFilterSet
class TutorialListView(ListView):
model = Tutorial
template_name = "learning/tutorials_archive.html"
page_kwarg = "page"
context_object_name = "tutorials"
def get_paginate_by(self, queryset):
return config.LEARNING_TUTORIAL_ARCHIVE_PAGINATE_BY
def get_queryset(self):
# All confirmed and active tutorials
tutorials = (
Tutorial.objects.order_by("-create_date")
.only_main_fields()
.active_and_confirmed_tutorials()
)
# Filter and order tutorials, then annonate comments_count
tutorials = TutorialArchiveFilterSet(
self.request.GET, tutorials
).qs.annonate_comments_count()
return tutorials
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
category_slug = self.request.GET.get("category")
if category_slug:
context["category"] = (
Category.objects.active_categories()
.filter(slug=category_slug)
.first()
)
return context
|
the-stack_0_4746 | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
# test API provided through BaseSktimeForecaster
"""SKtime Forecasters Test."""
__author__ = ["@mloning"]
__all__ = [
"test_different_fh_in_fit_and_predict_req",
"test_fh_in_fit_opt",
"test_fh_in_fit_req",
"test_fh_in_predict_opt",
"test_no_fh_in_fit_req",
"test_no_fh_opt",
"test_oh_setting",
"test_same_fh_in_fit_and_predict_opt",
"test_same_fh_in_fit_and_predict_req",
]
import numpy as np
import pytest
from sktime.forecasting.base import BaseForecaster
from sktime.forecasting.base._sktime import _BaseWindowForecaster
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.registry import all_estimators
from sktime.utils._testing.forecasting import _get_n_columns, make_forecasting_problem
from sktime.utils._testing.series import _make_series
# get all forecasters
FORECASTERS = [
forecaster
for (name, forecaster) in all_estimators(estimator_types="forecaster")
if issubclass(forecaster, BaseForecaster)
]
FH0 = 1
WINDOW_FORECASTERS = [
forecaster
for (name, forecaster) in all_estimators(estimator_types="forecaster")
if issubclass(forecaster, _BaseWindowForecaster)
]
# testing data
y = make_forecasting_problem()
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
# test _y setting
@pytest.mark.parametrize("Forecaster", FORECASTERS)
def test_oh_setting(Forecaster):
"""Check cuttoff and _y."""
# check _y and cutoff is None after construction
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
assert f._y is None
assert f.cutoff is None
# check that _y and cutoff is updated during fit
f.fit(y_train, fh=FH0)
# assert isinstance(f._y, pd.Series)
# action:uncomments the line above
# why: fails for multivariates cause they are DataFrames
# solution: look for a general solution for Series and DataFrames
assert len(f._y) > 0
assert f.cutoff == y_train.index[-1]
# check data pointers
np.testing.assert_array_equal(f._y.index, y_train.index)
# check that _y and cutoff is updated during update
f.update(y_test, update_params=False)
np.testing.assert_array_equal(
f._y.index, np.append(y_train.index, y_test.index)
)
assert f.cutoff == y_test.index[-1]
# check setting/getting API for forecasting horizon
# divide Forecasters into groups based on when fh is required
FORECASTERS_REQUIRED = [
f for f in FORECASTERS if f.get_class_tag("requires-fh-in-fit", True)
]
FORECASTERS_OPTIONAL = [
f for f in FORECASTERS if not f.get_class_tag("requires-fh-in-fit", True)
]
# testing Forecasters which require fh during fitting
@pytest.mark.parametrize("Forecaster", FORECASTERS_REQUIRED)
def test_no_fh_in_fit_req(Forecaster):
"""Check if fh is required in fit."""
f = Forecaster.create_test_instance()
# fh required in fit, raises error if not passed
with pytest.raises(ValueError):
f.fit(y_train)
@pytest.mark.parametrize("Forecaster", FORECASTERS_REQUIRED)
def test_fh_in_fit_req(Forecaster):
"""Checks if fh is requred in fit."""
f = Forecaster.create_test_instance()
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
f.predict()
np.testing.assert_array_equal(f.fh, FH0)
@pytest.mark.parametrize("Forecaster", FORECASTERS_REQUIRED)
def test_same_fh_in_fit_and_predict_req(Forecaster):
"""Check if fh is the same in fit and predict."""
f = Forecaster.create_test_instance()
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
@pytest.mark.parametrize("Forecaster", FORECASTERS_REQUIRED)
def test_different_fh_in_fit_and_predict_req(Forecaster):
"""Check if fh is different in fit and predict."""
f = Forecaster.create_test_instance()
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
# updating fh during predict raises error as fitted model depends on fh
# seen in fit
with pytest.raises(ValueError):
f.predict(fh=FH0 + 1)
# testing Forecasters which take fh either during fitting or predicting
@pytest.mark.parametrize("Forecaster", FORECASTERS_OPTIONAL)
def test_no_fh_opt(Forecaster):
"""Check if fh is optional in fit."""
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y_train = _make_series(n_columns=n_columns)
f.fit(y_train)
# not passing fh to either fit or predict raises error
with pytest.raises(ValueError):
f.predict()
@pytest.mark.parametrize("Forecaster", FORECASTERS_OPTIONAL)
def test_fh_in_fit_opt(Forecaster):
"""Check if fh is optional in fit."""
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y_train = _make_series(n_columns=n_columns)
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
f.predict()
np.testing.assert_array_equal(f.fh, FH0)
@pytest.mark.parametrize("Forecaster", FORECASTERS_OPTIONAL)
def test_fh_in_predict_opt(Forecaster):
"""Check if fh is optional in predict."""
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y_train = _make_series(n_columns=n_columns)
f.fit(y_train)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
@pytest.mark.parametrize("Forecaster", FORECASTERS_OPTIONAL)
def test_same_fh_in_fit_and_predict_opt(Forecaster):
"""Check if fh is the same in fit and predict."""
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y_train = _make_series(n_columns=n_columns)
# passing the same fh to both fit and predict works
f.fit(y_train, fh=FH0)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
@pytest.mark.parametrize("Forecaster", WINDOW_FORECASTERS)
def test_last_window(Forecaster):
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y_train = _make_series(n_columns=n_columns)
# passing the same fh to both fit and predict works
f.fit(y_train, fh=FH0)
actual, _ = f._get_last_window()
expected = y_train.iloc[-f.window_length_ :]
np.testing.assert_array_equal(actual, expected)
assert len(actual) == f.window_length_
|
the-stack_0_4747 | import numpy as np
from autoencirt.irt.grm import GRModel
from bayesianquilts.dense import DenseHorseshoe
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import util as tfp_util
from tensorflow_probability.python.mcmc.transformed_kernel import (
make_transform_fn, make_transformed_log_prob, make_log_det_jacobian_fn)
from tensorflow_probability.python.bijectors import softplus as softplus_lib
from bayesianquilts.util import (
clip_gradients
)
tfd = tfp.distributions
tfd = tfp.distributions
tfb = tfp.bijectors
class AEGRModel(GRModel):
def __init__(self,
auxiliary_parameterization=True,
xi_scale=1e-2,
eta_scale=1e-2,
kappa_scale=1e-2,
weight_exponent=1.0,
dim=2,
decay=0.25,
positive_discriminations=True,
hidden_layers=[100, 100],
num_items=1,
):
super(AEGRModel, self).__init__(
auxiliary_parameterization=True,
xi_scale=xi_scale,
eta_scale=eta_scale,
kappa_scale=kappa_scale,
weight_exponent=weight_exponent,
dim=dim,
decay=decay,
positive_discriminations=positive_discriminations
)
self.num_items = num_items,
self.hidden_layers = hidden_layers
self.grm_vars = self.var_list
def initialize_nn(self, hidden_layers=None):
if hidden_layers is not None:
self.hidden_layers = hidden_layers
else:
hidden_layers = self.hidden_layers
self.nn = DenseHorseshoe(
self.num_items,
hidden_layers + [self.dimensions],
reparameterized=True)
self.nn_var_list = self.nn.var_list
def load_data(self, *args, **kwargs):
super(AEGRModel, self).load_data(*args, **kwargs)
self.initialize_nn()
def joint_log_prob(self, **x):
prior = self.joint_log_prior(**x)
d0 = tf.concat(
[x['difficulties0'], x['ddifficulties']],
axis=-1)
difficulties = tf.cumsum(
d0, axis=-1)
likelihood = tf.reduce_sum(
self.log_likelihood(
self.calibration_data,
x['discriminations'],
difficulties,
x['abilities']
),
axis=[-1, -2]
)
return prior + likelihood
def joint_log_prior(
self, **x):
weight_tensors = {v: x[v] for v in self.nn.weight_var_list}
abilities = self.nn.assemble_networks(
weight_tensors)(self.calibration_data)
grm_vars = {k: x[k] for k in self.grm_vars}
grm_vars["abilities"] = abilities[..., tf.newaxis, tf.newaxis]
grm_vars["responses"] = self.calibration_data
nn_log_prior = self.nn.log_prob(weight_tensors)
grm_log_prior = (
super(
AEGRModel, self
).joint_log_prob_auxiliary(**grm_vars) if self.auxiliary_parameterization
else
super(
AEGRModel, self
).joint_log_prob(**grm_vars)
)
return nn_log_prior + grm_log_prior
def sample(self, *args, **kwargs):
nn_sample = self.nn.sample(*args, **kwargs)
grm_sample = self.surrogate_posterior.sample(*args, **kwargs)
return {**nn_sample, **grm_sample}
def create_distributions(self, *args, **kwargs):
super(
AEGRModel, self
).create_distributions(
*args, **kwargs
)
self.surrogate_distribution_hybrid = (
tfd.JointDistributionNamed({
**self.surrogate_distribution_dict,
**self.nn.surrogate_distribution_dict
})
)
def calibrate_advi(
self, num_steps=10, initial_learning_rate=5e-3,
decay_rate=0.99, learning_rate=None,
opt=None, clip=None):
if learning_rate is None:
learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=initial_learning_rate,
decay_steps=num_steps,
decay_rate=decay_rate,
staircase=True)
if opt is None:
opt = tf.optimizers.Adam(
learning_rate=learning_rate)
@tf.function
def run_approximation(num_steps):
losses = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn=(
self.joint_log_prob if clip is None
else clip_gradients(
self.joint_log_prob, clip)),
surrogate_posterior=self.surrogate_distribution_hybrid,
optimizer=opt,
num_steps=num_steps,
sample_size=25
)
return(losses)
losses = run_approximation(num_steps)
print(losses)
if (not np.isnan(losses[-1])) and (not np.isinf(losses[-1])):
self.set_calibration_expectations()
return(losses)
def main():
from autoencirt.data.rwa import get_data
aegrm = AEGRModel(hidden_layers=[20, 30])
aegrm.load_data(get_data())
aegrm.create_distributions()
sample = aegrm.sample([2, 3])
prob = aegrm.joint_log_prob(**sample)
print(prob)
aegrm.calibrate_advi(10, clip=1.)
return
if __name__ == "__main__":
main()
|
the-stack_0_4749 | """
Support for local control of entities by emulating the Phillips Hue bridge.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/emulated_hue/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant import util
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.components.http import REQUIREMENTS # NOQA
from homeassistant.components.http import HomeAssistantWSGI
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.deprecation import get_deprecated
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
from .hue_api import (
HueUsernameView, HueAllLightsStateView, HueOneLightStateView,
HueOneLightChangeView)
from .upnp import DescriptionXmlView, UPNPResponderThread
DOMAIN = 'emulated_hue'
_LOGGER = logging.getLogger(__name__)
NUMBERS_FILE = 'emulated_hue_ids.json'
CONF_HOST_IP = 'host_ip'
CONF_LISTEN_PORT = 'listen_port'
CONF_ADVERTISE_IP = 'advertise_ip'
CONF_ADVERTISE_PORT = 'advertise_port'
CONF_UPNP_BIND_MULTICAST = 'upnp_bind_multicast'
CONF_OFF_MAPS_TO_ON_DOMAINS = 'off_maps_to_on_domains'
CONF_EXPOSE_BY_DEFAULT = 'expose_by_default'
CONF_EXPOSED_DOMAINS = 'exposed_domains'
CONF_TYPE = 'type'
TYPE_ALEXA = 'alexa'
TYPE_GOOGLE = 'google_home'
DEFAULT_LISTEN_PORT = 8300
DEFAULT_UPNP_BIND_MULTICAST = True
DEFAULT_OFF_MAPS_TO_ON_DOMAINS = ['script', 'scene']
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
'switch', 'light', 'group', 'input_boolean', 'media_player', 'fan'
]
DEFAULT_TYPE = TYPE_GOOGLE
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST_IP): cv.string,
vol.Optional(CONF_LISTEN_PORT, default=DEFAULT_LISTEN_PORT): cv.port,
vol.Optional(CONF_ADVERTISE_IP): cv.string,
vol.Optional(CONF_ADVERTISE_PORT): cv.port,
vol.Optional(CONF_UPNP_BIND_MULTICAST): cv.boolean,
vol.Optional(CONF_OFF_MAPS_TO_ON_DOMAINS): cv.ensure_list,
vol.Optional(CONF_EXPOSE_BY_DEFAULT): cv.boolean,
vol.Optional(CONF_EXPOSED_DOMAINS): cv.ensure_list,
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE):
vol.Any(TYPE_ALEXA, TYPE_GOOGLE)
})
}, extra=vol.ALLOW_EXTRA)
ATTR_EMULATED_HUE = 'emulated_hue'
ATTR_EMULATED_HUE_HIDDEN = 'emulated_hue_hidden'
def setup(hass, yaml_config):
"""Activate the emulated_hue component."""
config = Config(hass, yaml_config.get(DOMAIN, {}))
server = HomeAssistantWSGI(
hass,
server_host=config.host_ip_addr,
server_port=config.listen_port,
api_password=None,
ssl_certificate=None,
ssl_key=None,
cors_origins=None,
use_x_forwarded_for=False,
trusted_networks=[],
login_threshold=0,
is_ban_enabled=False
)
server.register_view(DescriptionXmlView(config))
server.register_view(HueUsernameView)
server.register_view(HueAllLightsStateView(config))
server.register_view(HueOneLightStateView(config))
server.register_view(HueOneLightChangeView(config))
upnp_listener = UPNPResponderThread(
config.host_ip_addr, config.listen_port,
config.upnp_bind_multicast, config.advertise_ip,
config.advertise_port)
@asyncio.coroutine
def stop_emulated_hue_bridge(event):
"""Stop the emulated hue bridge."""
upnp_listener.stop()
yield from server.stop()
@asyncio.coroutine
def start_emulated_hue_bridge(event):
"""Start the emulated hue bridge."""
upnp_listener.start()
yield from server.start()
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, stop_emulated_hue_bridge)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_emulated_hue_bridge)
return True
class Config(object):
"""Hold configuration variables for the emulated hue bridge."""
def __init__(self, hass, conf):
"""Initialize the instance."""
self.hass = hass
self.type = conf.get(CONF_TYPE)
self.numbers = None
self.cached_states = {}
if self.type == TYPE_ALEXA:
_LOGGER.warning("Alexa type is deprecated and will be removed in a"
" future version")
# Get the IP address that will be passed to the Echo during discovery
self.host_ip_addr = conf.get(CONF_HOST_IP)
if self.host_ip_addr is None:
self.host_ip_addr = util.get_local_ip()
_LOGGER.info(
"Listen IP address not specified, auto-detected address is %s",
self.host_ip_addr)
# Get the port that the Hue bridge will listen on
self.listen_port = conf.get(CONF_LISTEN_PORT)
if not isinstance(self.listen_port, int):
self.listen_port = DEFAULT_LISTEN_PORT
_LOGGER.info(
"Listen port not specified, defaulting to %s",
self.listen_port)
if self.type == TYPE_GOOGLE and self.listen_port != 80:
_LOGGER.warning("When targeting Google Home, listening port has "
"to be port 80")
# Get whether or not UPNP binds to multicast address (239.255.255.250)
# or to the unicast address (host_ip_addr)
self.upnp_bind_multicast = conf.get(
CONF_UPNP_BIND_MULTICAST, DEFAULT_UPNP_BIND_MULTICAST)
# Get domains that cause both "on" and "off" commands to map to "on"
# This is primarily useful for things like scenes or scripts, which
# don't really have a concept of being off
self.off_maps_to_on_domains = conf.get(CONF_OFF_MAPS_TO_ON_DOMAINS)
if not isinstance(self.off_maps_to_on_domains, list):
self.off_maps_to_on_domains = DEFAULT_OFF_MAPS_TO_ON_DOMAINS
# Get whether or not entities should be exposed by default, or if only
# explicitly marked ones will be exposed
self.expose_by_default = conf.get(
CONF_EXPOSE_BY_DEFAULT, DEFAULT_EXPOSE_BY_DEFAULT)
# Get domains that are exposed by default when expose_by_default is
# True
self.exposed_domains = conf.get(
CONF_EXPOSED_DOMAINS, DEFAULT_EXPOSED_DOMAINS)
# Calculated effective advertised IP and port for network isolation
self.advertise_ip = conf.get(
CONF_ADVERTISE_IP) or self.host_ip_addr
self.advertise_port = conf.get(
CONF_ADVERTISE_PORT) or self.listen_port
def entity_id_to_number(self, entity_id):
"""Get a unique number for the entity id."""
if self.type == TYPE_ALEXA:
return entity_id
if self.numbers is None:
self.numbers = _load_json(self.hass.config.path(NUMBERS_FILE))
# Google Home
for number, ent_id in self.numbers.items():
if entity_id == ent_id:
return number
number = '1'
if self.numbers:
number = str(max(int(k) for k in self.numbers) + 1)
self.numbers[number] = entity_id
save_json(self.hass.config.path(NUMBERS_FILE), self.numbers)
return number
def number_to_entity_id(self, number):
"""Convert unique number to entity id."""
if self.type == TYPE_ALEXA:
return number
if self.numbers is None:
self.numbers = _load_json(self.hass.config.path(NUMBERS_FILE))
# Google Home
assert isinstance(number, str)
return self.numbers.get(number)
def is_entity_exposed(self, entity):
"""Determine if an entity should be exposed on the emulated bridge.
Async friendly.
"""
if entity.attributes.get('view') is not None:
# Ignore entities that are views
return False
domain = entity.domain.lower()
explicit_expose = entity.attributes.get(ATTR_EMULATED_HUE, None)
explicit_hidden = entity.attributes.get(ATTR_EMULATED_HUE_HIDDEN, None)
if explicit_expose is True or explicit_hidden is False:
expose = True
elif explicit_expose is False or explicit_hidden is True:
expose = False
else:
expose = None
get_deprecated(entity.attributes, ATTR_EMULATED_HUE_HIDDEN,
ATTR_EMULATED_HUE, None)
domain_exposed_by_default = \
self.expose_by_default and domain in self.exposed_domains
# Expose an entity if the entity's domain is exposed by default and
# the configuration doesn't explicitly exclude it from being
# exposed, or if the entity is explicitly exposed
is_default_exposed = \
domain_exposed_by_default and expose is not False
return is_default_exposed or expose
def _load_json(filename):
"""Wrapper, because we actually want to handle invalid json."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
|
the-stack_0_4750 | """mysite2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('test_get',views.test_get),
path('test_post',views.test_post),
path('birthday',views.birthday),
path('test_html',views.test_html),
path('mycalc',views.test_calc),
path("",views.test_html)
]
|
the-stack_0_4752 | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name
###############################################################################
# Copyright (c), The AiiDA-CP2K authors. #
# SPDX-License-Identifier: MIT #
# AiiDA-CP2K is hosted on GitHub at https://github.com/aiidateam/aiida-cp2k #
# For further information on the license, see the LICENSE.txt file. #
###############################################################################
"""Run simple DFT calculation"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import ase.build
import click
from aiida.engine import run
from aiida.orm import (Code, Dict, SinglefileData, StructureData)
from aiida.common import NotExistent
from aiida.plugins import CalculationFactory
Cp2kCalculation = CalculationFactory('cp2k')
def example_structure_through_file(cp2k_code):
"""Run simple DFT calculation"""
print("Testing CP2K ENERGY on H2O (DFT). Water molecule is provided through a file input...")
pwd = os.path.dirname(os.path.realpath(__file__))
# structure
atoms = ase.build.molecule('H2O')
atoms.center(vacuum=2.0)
structure = StructureData(ase=atoms)
# basis set
basis_file = SinglefileData(file=os.path.join(pwd, "..", "files", "BASIS_MOLOPT"))
# pseudopotentials
pseudo_file = SinglefileData(file=os.path.join(pwd, "..", "files", "GTH_POTENTIALS"))
# parameters
parameters = Dict(
dict={
'FORCE_EVAL': {
'METHOD': 'Quickstep',
'DFT': {
'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',
'POTENTIAL_FILE_NAME': 'GTH_POTENTIALS',
'QS': {
'EPS_DEFAULT': 1.0e-12,
'WF_INTERPOLATION': 'ps',
'EXTRAPOLATION_ORDER': 3,
},
'MGRID': {
'NGRIDS': 4,
'CUTOFF': 280,
'REL_CUTOFF': 30,
},
'XC': {
'XC_FUNCTIONAL': {
'_': 'LDA',
},
},
'POISSON': {
'PERIODIC': 'none',
'PSOLVER': 'MT',
},
},
'SUBSYS': {
'TOPOLOGY': {
'COORD_FILE_NAME': 'water.xyz',
'COORD_FILE_FORMAT': 'XYZ'
},
'CELL': {
'ABC': '{:<15} {:<15} {:<15}'.format(*atoms.cell.diagonal()),
},
'KIND': [
{
'_': 'O',
'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
'POTENTIAL': 'GTH-LDA-q6'
},
{
'_': 'H',
'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
'POTENTIAL': 'GTH-LDA-q1'
},
],
},
}
})
# Construct process builder
builder = Cp2kCalculation.get_builder()
builder.parameters = parameters
builder.code = cp2k_code
builder.file = {
'basis': basis_file,
'pseudo': pseudo_file,
'water': structure,
}
builder.metadata.options.resources = {
"num_machines": 1,
"num_mpiprocs_per_machine": 1,
}
builder.metadata.options.max_wallclock_seconds = 1 * 3 * 60
print("Submitted calculation...")
run(builder)
@click.command('cli')
@click.argument('codelabel')
def cli(codelabel):
"""Click interface"""
try:
code = Code.get_from_string(codelabel)
except NotExistent:
print("The code '{}' does not exist".format(codelabel))
sys.exit(1)
example_structure_through_file(code)
if __name__ == '__main__':
cli() # pylint: disable=no-value-for-parameter
|
the-stack_0_4756 | from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from src.api import Movies , Categories , Upload
from src.models import Database
from os import getcwd
def create_app():
static_folder = getcwd() + '/img/'
app = Flask(
__name__,
static_folder=static_folder
)
CORS(app)
api = Api(app)
Database().migrate()
api.add_resource(Upload,'/upload')
api.add_resource(Movies,'/movies','/movies/<string:id>')
api.add_resource(Categories,'/categories','/categories/<string:id>')
return app
if __name__ == '__main__':
app = create_app()
host = '0.0.0.0'
port = 8000
debug = True
app.run(host,port,debug)
|
the-stack_0_4757 | import discord
import logging
import pprint
import socket
from aiohttp import web
from json import JSONDecodeError
from logging.config import fileConfig
from typing import List, Union
from utils.match import Match
class WebServer:
def __init__(self, bot):
from bot import ICL_bot
fileConfig('logging.conf')
self.logger = logging.getLogger(f'ICL_bot.{__name__}')
self.bot: ICL_bot = bot
self.IP: str = socket.gethostbyname(socket.gethostname())
self.port: int = self.bot.bot_port
self.site: web.TCPSite = None
async def _handler(self, request: web.Request) -> Union[web.Response, web.FileResponse]:
"""
Super simple HTTP handler.
Parameters
----------
request : web.Request
AIOHTTP request object.
"""
if request.method == 'GET':
self.logger.debug(f'{request.remote} accessed {self.IP}:{self.port}{request.path}')
return WebServer._http_error_handler()
# Auth check for json
elif request.method == 'POST':
try:
faceit = await request.json()
except JSONDecodeError:
self.logger.warning(f'{request.remote} sent a invalid json POST ')
return WebServer._http_error_handler('json-body')
self.logger.debug(f'webhook = \n {pprint.pformat(faceit)}')
if faceit['retry_count'] == 0:
if faceit['event'] == 'match_status_ready':
self.logger.debug(f'{faceit["payload"]["id"]} is ready')
match_exists = False
for match_check in self.bot.matches:
self.logger.debug(f'{match_check.match_id}')
if match_check.match_id == str(faceit['payload']['id']):
match_exists = True
self.logger.error('Match already exists')
break
if not match_exists:
self.logger.info('Creating channels')
team1_channel: discord.VoiceChannel = await self.bot.get_channel(
787774505854042132).create_voice_channel(
name=faceit["payload"]["teams"][0]["name"], user_limit=6)
team2_channel: discord.VoiceChannel = await self.bot.get_channel(
787774505854042132).create_voice_channel(
name=faceit["payload"]["teams"][1]["name"], user_limit=6)
team1_roster = []
for team1_player in faceit["payload"]["teams"][0]["roster"]:
team1_roster.append((team1_player['id'], team1_player['nickname']))
team2_roster = []
for team2_player in faceit["payload"]["teams"][1]["roster"]:
team2_roster.append((team2_player['id'], team2_player['nickname']))
team1_invite = await team1_channel.create_invite(max_age=7200)
team2_invite = await team2_channel.create_invite(max_age=7200)
new_match = Match(faceit['payload']['id'], team1_channel, team2_channel, team1_invite, team2_invite,
faceit["payload"]["teams"][0]["name"], faceit["payload"]["teams"][1]["name"],
team1_roster, team2_roster)
self.bot.matches.append(new_match)
self.logger.debug(len(self.bot.matches))
self.logger.debug('finishing creating the match')
if not self.bot.cogs['CSGO'].update_scorecard.is_running():
self.logger.debug('starting loop thingy')
self.bot.cogs['CSGO'].update_scorecard.start()
if faceit['event'] == 'match_status_finished' or faceit['event'] == 'match_status_aborted' or faceit['event'] == 'match_status_cancelled':
self.logger.debug(f'{faceit["payload"]["id"]} is over')
match: Match = None
for match_check in self.bot.matches:
self.logger.debug(f'{match_check.match_id}')
if match_check.match_id == str(faceit['payload']['id']):
match = match_check
self.logger.debug(f'Found match {match.match_id}')
break
if match is not None:
for member in match.team1_channel.members + match.team2_channel.members:
try:
await member.move_to(channel=self.bot.get_channel(784164015122546751), reason=f'Match Complete')
except (discord.HTTPException, discord.Forbidden):
self.logger.error(f'Could not move {member}')
await match.team1_channel.delete(reason=f'{faceit["payload"]["id"]} Complete')
await match.team2_channel.delete(reason=f'{faceit["payload"]["id"]} Complete')
self.bot.matches.remove(match)
self.logger.debug('Sending 200')
return web.Response(status=200)
else:
# Used to decline any requests what doesn't match what our
# API expects.
self.logger.warning(f'{request.remote} sent an invalid request.')
return WebServer._http_error_handler("request-type")
async def http_start(self) -> None:
"""
Used to start the webserver inside the same context as the bot.
"""
server = web.Server(self._handler)
runner = web.ServerRunner(server)
await runner.setup()
self.site = web.TCPSite(runner, self.IP, self.port)
await self.site.start()
self.logger.info(f'Webserver Started on {self.IP}:{self.port}')
async def http_stop(self) -> None:
"""
Used to stop the webserver inside the same context as the bot.
"""
self.logger.warning(f'Webserver Stopping on {self.IP}:{self.port}')
await self.site.stop()
@staticmethod
def _http_error_handler(error: str = 'Undefined Error') -> web.Response:
"""
Used to handle HTTP error response.
Parameters
----------
error : bool, optional
Bool or string to be used, by default False
Returns
-------
web.Response
AIOHTTP web server response.
"""
return web.json_response(
{"error": error},
status=400 if error else 200
)
|
the-stack_0_4758 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Weight loader."""
import numpy as np
from mindspore.train.serialization import load_checkpoint
def load_infer_weights(config):
"""
Load weights from ckpt or npz.
Args:
config (TransformerConfig): Config.
Returns:
dict, weights.
"""
model_path = config.checkpoint_file_path
if model_path.endswith(".npz"):
ms_ckpt = np.load(model_path)
is_npz = True
else:
ms_ckpt = load_checkpoint(model_path)
is_npz = False
weights = {}
with open("variable_after_deal.txt", "a") as f:
for param_name in ms_ckpt:
infer_name = param_name.replace("transformer.transformer.", "")
if not infer_name.startswith("encoder"):
if infer_name.startswith("decoder.layers."):
infer_name = infer_name.replace("decoder.layers.", "decoder.layer")
infer_name = "decoder.decoder." + infer_name
if is_npz:
weights[infer_name] = ms_ckpt[param_name]
else:
weights[infer_name] = ms_ckpt[param_name].data.asnumpy()
f.write(infer_name)
f.write("\n")
f.close()
return weights
|
the-stack_0_4760 | from collections import Counter
from itertools import combinations
import json
import warnings
import networkx as nx
class CrossCorrelationGraph:
"""CrossCorrelationGraph for computing correlation between clusters
Attributes
----------
window : float
Threshold for the window size in seconds
correlation : float
Threshold for the minimum required correlation
graph : nx.Graph
Cross correlation graph containing all correlations
Note that each node in the graph represents an 'activity signature'
to avoid duplicates. The NetworkDestinations corresponding to each
signature are stored in the 'mapping' attribute.
Note
----
IMPORTANT: The CrossCorrelation.graph object is an optimised graph.
Each node does not represent a network destination, but represents
an activity fingerprint. E.g. when destinations A and B are both
only active at time slices 3 and 7, then these destinations are
represented by a single node. We use the self.mapping to extract the
network destinations from each graph node.
This is a huge optimisation for finding cliques as the number of
different network destinations theoretically covers the entire IP
space, whereas the number of activity fingerprints is bounded by
2^(batch / window), in our work 2^(300/30) = 2^10 = 1024. If these
parameters change, the complexity may increase, but never beyond the
original bounds. Hence, this optimisation never has a worse time
complexity.
mapping : dict
NetworkDestinations corresponding to each node in the graph
"""
def __init__(self, window=30, correlation=0.1):
"""CrossCorrelationGraph for computing correlation between clusters
Parameters
----------
window : float, default=30
Threshold for the window size in seconds
correlation : float, default=0.1
Threshold for the minimum required correlation
"""
# Set parameters
self.window = window
self.correlation = correlation
self.mapping = dict()
self.graph = nx.Graph()
def fit(self, cluster, y=None):
"""Fit Cross Correlation Graph.
Parameters
----------
cluster : Cluster
Cluster to fit graph, cluster must be populated with flows
y : ignored
Returns
-------
result : self
Returns self
"""
# Compute cross correlations within cluster
correlations, self.mapping = self.cross_correlation(cluster)
if self.correlation <= 0: # Create a fully connected graph
self.graph = nx.complete_graph(list(self.mapping.keys()))
else:
self.graph = nx.Graph()
self.graph.add_nodes_from(list(self.mapping.keys()))
for (u, v), weight in correlations.items():
if weight >= self.correlation:
self.graph.add_edge(u, v, weight=weight)
return self
def predict(self, X=None, y=None):
"""Fit Cross Correlation Graph and return cliques.
Parameters
----------
X : ignored
y : ignored
Returns
-------
result : Generator of cliques
Generator of all cliques in the graph
"""
cliques = nx.find_cliques(self.graph)
return (set.union(*[self.mapping.get(n) for n in c]) for c in cliques)
def fit_predict(self, cluster, y=None):
"""Fit cross correlation graph with clusters from X and return cliques.
Parameters
----------
cluster : Cluster
Cluster to fit graph, cluster must be populated with flows
y : ignored
Returns
-------
result : Generator of cliques
Generator of all cliques in the graph
"""
return self.fit(cluster).predict(cluster)
def export(self, outfile, dense=True, format="gexf"):
"""Export CrossCorrelationGraph to outfile for further analysis
Parameters
----------
outfile : string
File to export CrossCorrelationGraph
dense : boolean, default=True
If True export the dense graph (see IMPORTANT note at graph),
this means that each node is represented by the time slices in
which they were active. Each node still has the information of
all correlated nodes.
If False export the complete graph. Note that these graphs can
get very large with lots of edges, therefore, for manual
inspection it is recommended to use dense=True instead.
format : ('gexf'|'gml'), default='gexf'
Format in which to export, currently only 'gexf', 'gml' are
supported.
"""
if dense:
graph = self.graph
# Initialise human-readable mapping of nodes
mapping = dict()
# Fill mapping
for node in graph:
info = {
"window": list(sorted(node)),
"ips": set(),
"certs": set(),
"labels": Counter(),
}
# Loop over corresponding network destinations
for destination in self.mapping.get(node):
info["ips"] = info.get("ips", set()) | destination.destinations
info["certs"] = info.get("certs", set()) | destination.certificates
info["labels"] = info.get("labels", Counter()) + destination.labels
# Remove None from certificates
info["certs"] = info.get("certs", set()) - {None}
# Transform sets into lists
info["ips"] = list(info.get("ips", set()))
info["certs"] = list(info.get("certs", set()))
# Store mapping as text
mapping[node] = json.dumps(info, sort_keys=True)
graph = nx.relabel_nodes(graph, mapping)
# Make graph not dense
else:
graph = nx.Graph()
for node in self.graph:
for destination in self.mapping.get(node):
graph.add_node(destination)
for node in self.graph:
for source in self.mapping.get(node):
for destination in self.mapping.get(node):
if source == destination:
continue
graph.add_edge(source, destination, weight=1)
# Add all edges to other nodes
for connected in nx.neighbors(self.graph, node):
# Get edge get_edge_data
data = self.graph.get_edge_data(node, connected)
# Get all destinations
for destination in self.mapping.get(connected):
graph.add_edge(source, destination, data=data)
# Transform network destinations to human readable format
mapping = dict()
for node in self.graph:
for destination in self.mapping.get(node):
info = {
"window": list(sorted(node)),
"ips": list(destination.destinations),
"certs": list(destination.certificates - {None}),
"labels": destination.labels,
}
mapping[destination] = json.dumps(info, sort_keys=True)
graph = nx.relabel_nodes(graph, mapping)
# Export graph to file
if format.lower() == "gexf":
nx.write_gexf(graph, outfile)
elif format.lower() == "gml":
nx.write_gml(graph, outfile)
else:
# Warn user of unknown format
warnings.warn(f"Unknown export format '{format}', defaulting to 'gexf'")
# Export as gexf
nx.write_gexf(graph, outfile)
def cross_correlation(self, cluster):
"""Compute cross correlation between clusters
Parameters
----------
cluster : Cluster
Cluster to fit graph, cluster must be populated with flows
Returns
-------
correlation : dict
Dictionary of cross correlation values between each
NetworkDestination inside cluster.
mapping : dict
Mapping of activity fingerprint -> clusters
"""
correlation = dict()
# Get activity of samples
activity = self.activity(cluster)
# Get inverted mapping
mapping = dict()
for destination, active in activity.items():
mapping[frozenset(active)] = mapping.get(frozenset(active), set()) | set(
[destination]
)
# Compute cross correlation values
for x, y in combinations(mapping, 2):
union = len(x & y)
if union:
intersection = len(x | y)
correlation[x, y] = union / intersection
return correlation, mapping
def activity(self, cluster):
"""Extracts sets of active clusters by time.
Parameters
----------
cluster : Cluster
Cluster to fit graph, cluster must be populated with flows
Returns
-------
mapping : dict
Dictionary of NetworkDestination -> activity
"""
X = cluster.samples
start = min(x.time_start for x in X)
# Initialise mapping of NetworkDestination -> activity
mapping = dict()
for destination in cluster.clusters():
for flow in destination.samples:
activity = set()
for timestamp in flow.timestamps:
activity.add(int((timestamp - start) // self.window))
mapping[destination] = mapping.get(destination, set()) | activity
return mapping
|
the-stack_0_4761 | from PyQt5 import QtCore, QtWidgets, QtGui
import numpy as np
import brainbox as bb
class FilterGroup:
def __init__(self):
self.reset_filter_button = QtWidgets.QPushButton('Reset Filters')
self.contrast_options_text = QtWidgets.QLabel('Stimulus Contrast')
self.contrasts = [1, 0.25, 0.125, 0.0625, 0]
self.contrast_options = QtWidgets.QListWidget()
for val in self.contrasts:
item = QtWidgets.QListWidgetItem(str(val * 100) + ' %')
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(QtCore.Qt.Checked)
self.contrast_options.addItem(item)
self.hold_button = QtWidgets.QCheckBox('Hold')
self.hold_button.setCheckState(QtCore.Qt.Checked)
self.filter_buttons = QtWidgets.QButtonGroup()
self.filter_group = QtWidgets.QGroupBox('Filter Options')
self.filter_layout = QtWidgets.QVBoxLayout()
self.filter_layout.setSpacing(5)
#self.filter_buttons.setExclusive(False)
filter_options = ['all', 'correct', 'incorrect', 'left', 'right', 'left correct', 'left incorrect', 'right correct', 'right incorrect']
for i, val in enumerate(filter_options):
button = QtWidgets.QCheckBox(val)
if val == 'all':
button.setCheckState(QtCore.Qt.Checked)
else:
button.setCheckState(QtCore.Qt.Unchecked)
self.filter_buttons.addButton(button, id=i)
self.filter_layout.addWidget(button)
self.filter_group.setLayout(self.filter_layout)
self.trial_buttons = QtWidgets.QButtonGroup()
self.trial_group = QtWidgets.QGroupBox('Sort Trials By:')
self.trial_layout = QtWidgets.QHBoxLayout()
trial_options = ['trial no.', 'correct vs incorrect', 'left vs right', 'correct vs incorrect and left vs right']
for i, val in enumerate(trial_options):
button = QtWidgets.QRadioButton(val)
if i == 0:
button.setChecked(True)
else:
button.setChecked(False)
self.trial_buttons.addButton(button, id = i)
self.trial_layout.addWidget(button)
self.trial_group.setLayout(self.trial_layout)
# Print out no. of trials for each filter condition
self.ntrials_text = QtWidgets.QLabel('No. of trials = ')
self.filter_options_group = QtWidgets.QGroupBox()
self.group_filter_widget()
self.filter_options_group.setFixedSize(250, 380)
def group_filter_widget(self):
group_layout = QtWidgets.QVBoxLayout()
group_layout.addWidget(self.reset_filter_button)
group_layout.addWidget(self.contrast_options_text)
group_layout.addWidget(self.contrast_options)
group_layout.addWidget(self.hold_button)
group_layout.addWidget(self.filter_group)
group_layout.addWidget(self.ntrials_text)
self.filter_options_group.setLayout(group_layout)
def get_checked_contrasts(self):
'''
Finds the contrast options that are selected. Called by on_contrast_list_changed in gui_main.
Returns
----------
stim_contrast: list
A list of the contrast options that are selected
'''
stim_contrast = []
for idx in range(self.contrast_options.count()):
if self.contrast_options.item(idx).checkState() == QtCore.Qt.Checked:
stim_contrast.append(self.contrasts[idx])
return stim_contrast
def compute_and_sort_trials(self, stim_contrast):
#Precompute trials for a given contrast set
#All
all_trials = bb.core.Bunch()
all_trials['colour'] = QtGui.QColor('#808080')
all_trials['fill'] = QtGui.QColor('#808080')
all_trials['linestyle'] = QtGui.QPen(QtCore.Qt.SolidLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where((self.trials['contrastLeft'] == c) | (self.trials['contrastRight'] == c))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
all_trials['trial no.'] = trials_no
trials_ic = bb.core.Bunch()
correct = np.intersect1d(trials_id, self.correct_idx)
incorrect = np.intersect1d(trials_id, self.incorrect_idx)
trials_ic['trials'] = np.append(correct, incorrect)
trials_ic['lines'] = [[0, len(correct)], [len(correct), len(trials_ic['trials'])]]
trials_ic['linecolours'] = [QtGui.QColor('#1f77b4'), QtGui.QColor('#d62728')]
trials_ic['text'] = ['correct', 'incorrect']
all_trials['correct vs incorrect'] = trials_ic
trials_lf = bb.core.Bunch()
left = np.intersect1d(trials_id, self.left_idx)
right = np.intersect1d(trials_id, self.right_idx)
trials_lf['trials'] = np.append(left, right)
trials_lf['lines'] = [[0, len(left)], [len(left), len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#2ca02c'), QtGui.QColor('#bcbd22')]
trials_lf['text'] = ['left', 'right']
all_trials['left vs right'] = trials_lf
trials_iclf = bb.core.Bunch()
correct_right = np.intersect1d(trials_id, self.correct_right_idx)
correct_left = np.intersect1d(trials_id, self.correct_left_idx)
incorrect_right = np.intersect1d(trials_id, self.incorrect_right_idx)
incorrect_left = np.intersect1d(trials_id, self.incorrect_left_idx)
trials_iclf['trials'] = np.concatenate((correct_left, correct_right, incorrect_left, incorrect_right))
trials_iclf['lines'] = [[0, len(correct_left)], [len(correct_left), len(correct_left)
+ len(correct_right)], [len(correct_left) + len(correct_right), len(correct_left)
+ len(correct_right) + len(incorrect_left)],[len(correct_left) + len(correct_right)
+ len(incorrect_left), len(trials_iclf['trials'])]]
trials_iclf['linecolours'] = [QtGui.QColor('#17becf'), QtGui.QColor('#9467bd'), QtGui.QColor('#8c564b'), QtGui.QColor('#ff7f0e')]
trials_iclf['text'] = ['left correct', 'right correct', 'left incorrect', 'right incorrect']
all_trials['correct vs incorrect and left vs right'] = trials_iclf
#Correct
correct_trials = bb.core.Bunch()
correct_trials['colour'] = QtGui.QColor('#1f77b4')
correct_trials['fill'] = QtGui.QColor('#1f77b4')
correct_trials['linestyle'] = QtGui.QPen(QtCore.Qt.SolidLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where(((self.trials['contrastLeft'] == c) | (self.trials['contrastRight'] == c)) & (self.trials['feedbackType'] == 1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
correct_trials['trial no.'] = trials_no
trials_ic = bb.core.Bunch()
trials_ic['trials'] = trials_id
trials_ic['lines'] = [[0, len(trials_ic['trials'])]]
trials_ic['linecolours'] = [QtGui.QColor('#1f77b4')]
trials_ic['text'] = ['correct']
correct_trials['correct vs incorrect'] = trials_ic
trials_lf = bb.core.Bunch()
left = np.intersect1d(trials_id, self.correct_left_idx)
right = np.intersect1d(trials_id, self.correct_right_idx)
trials_lf['trials'] = np.append(left, right)
trials_lf['lines'] = [[0, len(left)], [len(left), len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#17becf'), QtGui.QColor('#9467bd')]
trials_lf['text'] = ['left correct', 'right correct']
correct_trials['left vs right'] = trials_lf
correct_trials['correct vs incorrect and left vs right'] = trials_lf
#Incorrect
incorrect_trials = bb.core.Bunch()
incorrect_trials['colour'] = QtGui.QColor('#d62728')
incorrect_trials['fill'] = QtGui.QColor('#d62728')
incorrect_trials['linestyle'] = QtGui.QPen(QtCore.Qt.SolidLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where(((self.trials['contrastLeft'] == c) | (self.trials['contrastRight'] == c)) & (self.trials['feedbackType'] == -1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
incorrect_trials['trial no.'] = trials_no
trials_ic = bb.core.Bunch()
trials_ic['trials'] = trials_id
trials_ic['lines'] = [[0, len(trials_ic['trials'])]]
trials_ic['linecolours'] = [QtGui.QColor('#d62728')]
trials_ic['text'] = ['incorrect']
incorrect_trials['correct vs incorrect'] = trials_ic
trials_lf = bb.core.Bunch()
trials_iclf = bb.core.Bunch()
left = np.intersect1d(trials_id, self.incorrect_left_idx)
right = np.intersect1d(trials_id, self.incorrect_right_idx)
trials_lf['trials'] = np.append(left, right)
trials_lf['lines'] = [[0, len(left)], [len(left), len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#8c564b'), QtGui.QColor('#ff7f0e')]
trials_lf['text'] = ['left incorrect', 'right incorrect']
incorrect_trials['left vs right'] = trials_lf
incorrect_trials['correct vs incorrect and left vs right'] = trials_lf
#Left
left_trials = bb.core.Bunch()
left_trials['colour'] = QtGui.QColor('#2ca02c')
left_trials['fill'] = QtGui.QColor('#2ca02c')
left_trials['linestyle'] = QtGui.QPen(QtCore.Qt.SolidLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where(self.trials['contrastLeft'] == c)[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
left_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#2ca02c')]
trials_lf['text'] = ['left']
left_trials['left vs right'] = trials_lf
trials_ic = bb.core.Bunch()
correct = np.intersect1d(trials_id, self.correct_left_idx)
incorrect = np.intersect1d(trials_id, self.incorrect_left_idx)
trials_ic['trials'] = np.append(correct, incorrect)
trials_ic['lines'] = [[0, len(correct)], [len(correct), len(trials_ic['trials'])]]
trials_ic['linecolours'] = [QtGui.QColor('#17becf'), QtGui.QColor('#8c564b')]
trials_ic['text'] = ['left correct', 'left incorrect']
left_trials['correct vs incorrect'] = trials_ic
left_trials['correct vs incorrect and left vs right'] = trials_ic
#Right
right_trials = bb.core.Bunch()
right_trials['colour'] = QtGui.QColor('#bcbd22')
right_trials['fill'] = QtGui.QColor('#bcbd22')
right_trials['linestyle'] = QtGui.QPen(QtCore.Qt.SolidLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where(self.trials['contrastRight'] == c)[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
right_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#bcbd22')]
trials_lf['text'] = ['right']
right_trials['left vs right'] = trials_lf
trials_ic = bb.core.Bunch()
correct = np.intersect1d(trials_id, self.correct_right_idx)
incorrect = np.intersect1d(trials_id, self.incorrect_right_idx)
trials_ic['trials'] = np.append(correct, incorrect)
trials_ic['lines'] = [[0, len(correct)], [len(correct), len(trials_ic['trials'])]]
trials_ic['linecolours'] = [QtGui.QColor('#9467bd'), QtGui.QColor('#ff7f0e')]
trials_ic['text'] = ['right correct', 'right incorrect']
right_trials['correct vs incorrect'] = trials_ic
right_trials['correct vs incorrect and left vs right'] = trials_ic
#Left Correct
left_correct_trials = bb.core.Bunch()
left_correct_trials['colour'] = QtGui.QColor('#17becf')
left_correct_trials['fill'] = QtGui.QColor('#17becf')
left_correct_trials['linestyle'] = QtGui.QPen(QtCore.Qt.DashLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where((self.trials['contrastLeft'] == c) & (self.trials['feedbackType'] == 1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
left_correct_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#17becf')]
trials_lf['text'] = ['left correct']
left_correct_trials['left vs right'] = trials_lf
left_correct_trials['correct vs incorrect'] = trials_lf
left_correct_trials['correct vs incorrect and left vs right'] = trials_lf
#Left Incorrect
left_incorrect_trials = bb.core.Bunch()
left_incorrect_trials['colour'] = QtGui.QColor('#8c564b')
left_incorrect_trials['fill'] = QtGui.QColor('#8c564b')
left_incorrect_trials['linestyle'] = QtGui.QPen(QtCore.Qt.DashLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where((self.trials['contrastLeft'] == c) & (self.trials['feedbackType'] == -1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
left_incorrect_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#8c564b')]
trials_lf['text'] = ['left incorrect']
left_incorrect_trials['left vs right'] = trials_lf
left_incorrect_trials['correct vs incorrect'] = trials_lf
left_incorrect_trials['correct vs incorrect and left vs right'] = trials_lf
#Right Correct
right_correct_trials = bb.core.Bunch()
right_correct_trials['colour'] = QtGui.QColor('#9467bd')
right_correct_trials['fill'] = QtGui.QColor('#9467bd')
right_correct_trials['linestyle'] = QtGui.QPen(QtCore.Qt.DashLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where((self.trials['contrastRight'] == c) & (self.trials['feedbackType'] == 1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
right_correct_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#9467bd')]
trials_lf['text'] = ['right correct']
right_correct_trials['left vs right'] = trials_lf
right_correct_trials['correct vs incorrect'] = trials_lf
right_correct_trials['correct vs incorrect and left vs right'] = trials_lf
#Right Incorrect
right_incorrect_trials = bb.core.Bunch()
right_incorrect_trials['colour'] = QtGui.QColor('#ff7f0e')
right_incorrect_trials['fill'] = QtGui.QColor('#ff7f0e')
right_incorrect_trials['linestyle'] = QtGui.QPen(QtCore.Qt.DashLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where((self.trials['contrastRight'] == c) & (self.trials['feedbackType'] == -1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
right_incorrect_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#ff7f0e')]
trials_lf['text'] = ['right incorrect']
right_incorrect_trials['left vs right'] = trials_lf
right_incorrect_trials['correct vs incorrect'] = trials_lf
right_incorrect_trials['correct vs incorrect and left vs right'] = trials_lf
trials = bb.core.Bunch()
trials['all'] = all_trials
trials['correct'] = correct_trials
trials['incorrect'] = incorrect_trials
trials['left'] = left_trials
trials['right'] = right_trials
trials['left correct'] = left_correct_trials
trials['left incorrect'] = left_incorrect_trials
trials['right correct'] = right_correct_trials
trials['right incorrect'] = right_incorrect_trials
return trials
def get_sort_method(self, case):
if case == 'all':
sort_method = 'trial no.'
id = 0
elif (case == 'correct') | (case == 'incorrect'):
sort_method = 'correct vs incorrect'
id = 1
elif (case == 'left') | (case == 'right'):
sort_method = 'left vs right'
id = 2
else:
sort_method = 'correct vs incorrect and left vs right'
id = 3
return sort_method, id
def compute_trial_options(self, trials):
self.trials = trials
nan_feedback = np.where(np.isnan(self.trials['feedback_times']))[0]
nan_goCue = np.where(np.isnan(self.trials['goCue_times']))[0]
self.nan_trials = np.unique(np.append(nan_feedback, nan_goCue))
self.n_trials = len(np.setdiff1d(np.arange(len(self.trials['feedbackType'])), self.nan_trials))
self.correct_idx = np.setdiff1d(np.where(self.trials['feedbackType'] == 1)[0], self.nan_trials)
self.incorrect_idx = np.setdiff1d(np.where(self.trials['feedbackType'] == -1)[0], self.nan_trials)
self.right_idx = np.setdiff1d(np.where(np.isfinite(self.trials['contrastRight']))[0], self.nan_trials)
self.left_idx = np.setdiff1d(np.where(np.isfinite(self.trials['contrastLeft']))[0], self.nan_trials)
self.correct_right_idx = np.setdiff1d(np.intersect1d(self.correct_idx, self.right_idx), self.nan_trials)
self.correct_left_idx = np.setdiff1d(np.intersect1d(self.correct_idx, self.left_idx), self.nan_trials)
self.incorrect_right_idx = np.setdiff1d(np.intersect1d(self.incorrect_idx, self.right_idx), self.nan_trials)
self.incorrect_left_idx = np.setdiff1d(np.intersect1d(self.incorrect_idx, self.left_idx), self.nan_trials)
return self.nan_trials
def reset_filters(self, stim = True):
stim_contrast = [1, 0.25, 0.125, 0.0625, 0]
case = 'all'
sort_method = 'trial no.'
if stim is True:
for idx in range(self.contrast_options.count()):
item = self.contrast_options.item(idx)
item.setCheckState(QtCore.Qt.Checked)
for idx, but in enumerate(self.filter_buttons.buttons()):
if idx == 0:
but.setCheckState(QtCore.Qt.Checked)
else:
but.setCheckState(QtCore.Qt.Unchecked)
for idx, but in enumerate(self.trial_buttons.buttons()):
if idx == 0:
but.setChecked(True)
else:
but.setChecked(False)
return stim_contrast, case, sort_method
|
the-stack_0_4762 | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
from collections import namedtuple
import functools
import logging
import re
import attr
from commoncode.datautils import choices
from commoncode.datautils import Boolean
from commoncode.datautils import Date
from commoncode.datautils import Integer
from commoncode.datautils import List
from commoncode.datautils import Mapping
from commoncode.datautils import String
from commoncode.datautils import TriBoolean
from textcode import analysis
"""
Handle Gemfile.lock Rubygems lockfile.
Since there is no specifications of the Gemfile.lock format, this
script is based on and contains code derived from Ruby Bundler:
https://raw.githubusercontent.com/bundler/bundler/77e7050364367d98f9bc96911ea2769b69a4735c/lib/bundler/lockfile_parser.rb
TODO: update to latest https://github.com/bundler/bundler/compare/77e7050364367d98f9bc96911ea2769b69a4735c...master#diff-3c625d3cd7d7604b3e2e3c5487a5ede6
Portions copyright (c) 2010 Andre Arko
Portions copyright (c) 2009 Engine Yard
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Some examples:
SVN
remote: file://#{lib_path('foo-1.0')}
revision: 1
ref: HEAD
glob: some globs
specs:
foo (1.0)
GIT
remote: #{lib_path("foo-1.0")}
revision: #{git.ref_for('omg')}
branch: omg
ref: xx
tag: xxx
submodules: xxx
glob:xxx
specs:
foo (1.0)
PATH
remote: relative-path
glob:
specs:
foo (1.0)
"""
TRACE = False
def logger_debug(*args):
pass
logger = logging.getLogger(__name__)
if TRACE:
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
# Section headings: these are also used as switches to track a parsing state
PATH = u'PATH'
GIT = u'GIT'
SVN = u'SVN'
GEM = u'GEM'
PLATFORMS = u'PLATFORMS'
DEPENDENCIES = u'DEPENDENCIES'
SPECS = u' specs:'
# types of Gems, which is really where they are provisioned from
# RubyGems repo, local path or VCS
GEM_TYPES = (GEM, PATH, GIT, SVN,)
@attr.s()
class GemDependency(object):
name = String()
version = String()
@attr.s()
class Gem(object):
"""
A Gem can be packaged as a .gem archive, or it can be a source gem either
fetched from GIT or SVN or from a local path.
"""
supported_opts = 'remote', 'ref', 'revision', 'branch', 'submodules', 'tag',
name = String()
version = String()
platform = String(
help='Gem platform')
remote = String(
help='remote can be a path, git, svn or Gem repo url. One of GEM, PATH, GIT or SVN')
type = String(
# validator=choices(GEM_TYPES),
help='the type of this Gem: One of: {}'.format(', '.join(GEM_TYPES))
)
pinned = Boolean()
spec_version = String()
# relative path
path = String()
revision = String(
help='A version control full revision (e.g. a Git commit hash).'
)
ref = String(
help='A version control ref (such as a tag, a shortened revision hash, etc.).'
)
branch = String()
submodules = String()
tag = String()
requirements = List(
item_type=String,
help='list of constraints such as ">= 1.1.9"'
)
dependencies = Mapping(
help='a map of direct dependent Gems, keyed by name',
value_type='Gem',
)
def refine(self):
"""
Apply some refinements to the Gem based on its type:
- fix version and revisions for Gems checked-out from VCS
"""
if self.type == PATH:
self.path = self.remote
if self.type in (GIT, SVN,):
# FIXME: this likely WRONG
# TODO: this may not be correct for SVN BUT SVN has been abandoned
self.spec_version = self.version
if self.revision and not self.ref:
self.version = self.revision
elif self.revision and self.ref:
self.version = self.revision
elif not self.revision and self.ref:
self.version = self.ref
elif not self.revision and self.ref:
self.version = self.ref
def as_nv_tree(self):
"""
Return a tree of name/versions dependency tuples from self as nested
dicts. The tree root is self. Each key is a name/version tuple.
Values are dicts.
"""
tree = {}
root = (self.name, self.version,)
tree[root] = {}
for _name, gem in self.dependencies.items():
tree[root].update(gem.as_nv_tree())
return tree
def flatten(self):
"""
Return a sorted flattened list of unique parent/child tuples.
"""
flattened = []
seen = set()
for gem in self.dependencies.values():
snv = self.type, self.name, self.version
gnv = gem.type, gem.name, gem.version
rel = self, gem
rel_key = snv, gnv
if rel_key not in seen:
flattened.append(rel)
seen.add(rel_key)
for rel in gem.flatten():
parent, child = rel
pnv = parent.type, parent.name, parent.version
cnv = child.type, child.name, child.version
rel_key = pnv, cnv
if rel_key not in seen:
flattened.append(rel)
seen.add(rel_key)
return sorted(flattened)
def dependency_tree(self):
"""
Return a tree of dependencies as nested mappings.
Each key is a "name@version" string and values are dicts.
"""
tree = {}
root = '{}@{}'.format(self.name or '', self.version or '')
tree[root] = {}
for _name, gem in self.dependencies.items():
tree[root].update(gem.dependency_tree())
return tree
def to_dict(self):
"""
Return a native mapping for this Gem.
"""
return dict([
('name', self.name),
('version', self.version),
('platform', self.platform),
('pinned', self.pinned),
('remote', self.remote),
('type', self.type),
('path', self.path),
('revision', self.revision),
('ref', self.ref),
('branch', self.branch),
('submodules', self.submodules),
('tag', self.tag),
('requirements', self.requirements),
('dependencies', self.dependency_tree()),
])
@property
def gem_name(self):
return '{}-{}.gem'.format(self.name, self.version)
OPTIONS = re.compile(r'^ (?P<key>[a-z]+): (?P<value>.*)$').match
def get_option(s):
"""
Parse Gemfile.lock options such as remote, ref, revision, etc.
"""
key = None
value = None
opts = OPTIONS(s)
if opts:
key = opts.group('key') or None
value = opts.group('value') or None
# normalize truth
if value == 'true':
value = True
if value == 'false':
value = False
# only keep known options, discard others
if key not in Gem.supported_opts:
key = None
value = None
return key, value
# parse name/version/platform
NAME_VERSION = (
# negative lookahead: not a space
'(?! )'
# a Gem name: several chars are not allowed
'(?P<name>[^ \\)\\(,!:]+)?'
# a space then opening parens (
'(?: \\('
# the version proper which is anything but a dash
'(?P<version>[^-]*)'
# and optionally some non-captured dash followed by anything, once
# pinned version can have this form:
# version-platform
# json (1.8.0-java) alpha (1.9.0-x86-mingw32) and may not contain a !
'(?:-(?P<platform>[^!]*))?'
# closing parens )
'\\)'
# NV is zero or one time
')?')
# parse direct dependencies
DEPS = re.compile(
# two spaces at line start
'^ {2}'
# NV proper
'%(NAME_VERSION)s'
# optional bang pinned
'(?P<pinned>!)?'
'$' % locals()).match
# parse spec-level dependencies
SPEC_DEPS = re.compile(
# four spaces at line start
'^ {4}'
'%(NAME_VERSION)s'
'$' % locals()).match
# parse direct dependencies on spec
SPEC_SUB_DEPS = re.compile(
# six spaces at line start
'^ {6}'
'%(NAME_VERSION)s'
'$' % locals()).match
PLATS = re.compile('^ (?P<platform>.*)$').match
class GemfileLockParser(object):
"""
Parse a Gemfile.lock. Code originally derived from Bundler's
/bundler/lib/bundler/lockfile_parser.rb parser
The parsing use a simple state machine, switching states based on sections
headings. The result is a tree of Gems objects stored in
self.dependencies.
"""
def __init__(self, lockfile):
self.lockfile = lockfile
# map of a line start string to the next parsing state function
self.STATES = {
DEPENDENCIES: self.parse_dependency,
PLATFORMS: self.parse_platform,
GIT: self.parse_options,
PATH: self.parse_options,
SVN: self.parse_options,
GEM: self.parse_options,
SPECS: self.parse_spec
}
# the final tree of dependencies, keyed by name
self.dependency_tree = {}
# a flat dict of all gems, keyed by name
self.all_gems = {}
self.platforms = []
# init parsing state
self.reset_state()
# parse proper
for line in analysis.unicode_text_lines(lockfile):
line = line.rstrip()
# reset state
if not line:
self.reset_state()
continue
# switch to new state
if line in self.STATES:
if line in GEM_TYPES:
self.current_type = line
self.state = self.STATES[line]
continue
# process state
if self.state:
self.state(line)
# finally refine the collected data
self.refine()
def reset_state (self):
self.state = None
self.current_options = {}
self.current_gem = None
self.current_type = None
def refine(self):
for gem in self.all_gems.values():
gem.refine()
def get_or_create(self, name, version=None, platform=None):
"""
Return an existing gem if it exists or creates a new one.
Update the all_gems registry.
"""
if name in self.all_gems:
gem = self.all_gems[name]
gem.version = gem.version or version
gem.platform = gem.platform or platform
else:
gem = Gem(name, version, platform)
self.all_gems[name] = gem
return gem
def parse_options(self, line):
key, value = get_option(line)
if key:
self.current_options[key] = value
def parse_spec(self, line):
spec_dep = SPEC_DEPS(line)
if spec_dep:
name = spec_dep.group('name')
# first level dep is always an exact version
version = spec_dep.group('version')
platform = spec_dep.group('platform') or 'ruby'
# always set a new current gem
self.current_gem = self.get_or_create(name, version, platform)
self.current_gem.type = self.current_type
if version:
self.current_gem.version = version
self.current_gem.platform = platform
for k, v in self.current_options.items():
setattr(self.current_gem, k, v)
return
spec_sub_dep = SPEC_SUB_DEPS(line)
if spec_sub_dep:
name = spec_sub_dep.group('name')
if name == 'bundler':
return
# second level dep is always a version constraint
requirements = spec_sub_dep.group('version') or []
if requirements:
requirements = [d.strip() for d in requirements.split(',')]
if name in self.current_gem.dependencies:
dep = self.current_gem.dependencies[name]
else:
dep = self.get_or_create(name)
self.current_gem.dependencies[name] = dep
# unless set , a sub dep is always a gem
if not dep.type:
dep.type = GEM
for v in requirements:
if v not in dep.requirements:
dep.requirements.append(v)
def parse_dependency(self, line):
deps = DEPS(line)
if not deps:
if TRACE:
logger_debug('ERROR: parse_dependency: '
'line not matched: %(line)r' % locals())
return
name = deps.group('name')
# at this stage ALL gems should already exist except possibly
# for bundler: not finding one is an error
try:
gem = self.all_gems[name]
except KeyError as e:
gem = Gem(name)
self.all_gems[name] = gem
if name != 'bundler' and TRACE:
logger_debug('ERROR: parse_dependency: '
'gem %(name)r does not yet exists in all_gems: '
'%(line)r' % locals())
if name in self.dependency_tree:
if TRACE:
logger_debug('WARNING: parse_dependency: '
'dependency %(name)r / %(version)r already declared. '
'line: %(line)r' % locals())
else:
self.dependency_tree[name] = gem
version = deps.group('version') or []
if version:
version = [v.strip() for v in version.split(',')]
# the version of a direct dep is always a constraint
# we append these at the top of the list as this is
# the main constraint
for v in version:
gem.requirements.insert(0, v)
# assert gem.version == version
gem.pinned = True if deps.group('pinned') else False
def parse_platform(self, line):
plat = PLATS(line)
if not plat:
if TRACE:
logger_debug('ERROR: parse_platform: '
'line not matched: %(line)r' % locals())
return
plat = plat.group('platform')
self.platforms.append(plat.strip())
def flatten(self):
"""
Return the Gems dependency_tree as a sorted list of unique
of tuples (parent Gem / child Gem) relationships.
"""
flattened = []
for direct in self.dependency_tree.values():
flattened.append((None, direct,))
flattened.extend(direct.flatten())
return sorted(set(flattened))
|
the-stack_0_4764 | #!/usr/bin/env python
import asyncio
import logging
import time
from collections import deque
from typing import List, Dict, Optional, Tuple, Deque
from hummingbot.client.command import __all__ as commands
from hummingbot.client.tab import __all__ as tab_classes
from hummingbot.core.clock import Clock
from hummingbot.exceptions import ArgumentParserError
from hummingbot.logger import HummingbotLogger
from hummingbot.logger.application_warning import ApplicationWarning
from hummingbot.model.sql_connection_manager import SQLConnectionManager
from hummingbot.connector.exchange.paper_trade import create_paper_trade_market
from hummingbot.client.ui.keybindings import load_key_bindings
from hummingbot.client.ui.parser import load_parser, ThrowingArgumentParser
from hummingbot.client.ui.hummingbot_cli import HummingbotCLI
from hummingbot.client.ui.completer import load_completer
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.config.config_helpers import (
get_strategy_config_map,
get_connector_class,
get_eth_wallet_private_key,
)
from hummingbot.strategy.strategy_base import StrategyBase
from hummingbot.strategy.cross_exchange_market_making import CrossExchangeMarketPair
from hummingbot.core.utils.kill_switch import KillSwitch
from hummingbot.core.utils.trading_pair_fetcher import TradingPairFetcher
from hummingbot.data_feed.data_feed_base import DataFeedBase
from hummingbot.notifier.notifier_base import NotifierBase
from hummingbot.notifier.telegram_notifier import TelegramNotifier
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from hummingbot.connector.markets_recorder import MarketsRecorder
from hummingbot.client.config.security import Security
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.client.settings import AllConnectorSettings, ConnectorType
from hummingbot.client.tab.data_types import CommandTab
s_logger = None
class HummingbotApplication(*commands):
KILL_TIMEOUT = 10.0
APP_WARNING_EXPIRY_DURATION = 3600.0
APP_WARNING_STATUS_LIMIT = 6
_main_app: Optional["HummingbotApplication"] = None
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
@classmethod
def main_application(cls) -> "HummingbotApplication":
if cls._main_app is None:
cls._main_app = HummingbotApplication()
return cls._main_app
def __init__(self):
# This is to start fetching trading pairs for auto-complete
TradingPairFetcher.get_instance()
self.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
command_tabs = self.init_command_tabs()
self.parser: ThrowingArgumentParser = load_parser(self, command_tabs)
self.app = HummingbotCLI(
input_handler=self._handle_command,
bindings=load_key_bindings(self),
completer=load_completer(self),
command_tabs=command_tabs
)
self.markets: Dict[str, ExchangeBase] = {}
# strategy file name and name get assigned value after import or create command
self._strategy_file_name: str = None
self.strategy_name: str = None
self.strategy_task: Optional[asyncio.Task] = None
self.strategy: Optional[StrategyBase] = None
self.market_pair: Optional[CrossExchangeMarketPair] = None
self.market_trading_pair_tuples: List[MarketTradingPairTuple] = []
self.clock: Optional[Clock] = None
self.market_trading_pairs_map = {}
self.token_list = {}
self.init_time: float = time.time()
self.start_time: Optional[int] = None
self.placeholder_mode = False
self.log_queue_listener: Optional[logging.handlers.QueueListener] = None
self.data_feed: Optional[DataFeedBase] = None
self.notifiers: List[NotifierBase] = []
self.kill_switch: Optional[KillSwitch] = None
self._app_warnings: Deque[ApplicationWarning] = deque()
self._trading_required: bool = True
self._last_started_strategy_file: Optional[str] = None
self.trade_fill_db: Optional[SQLConnectionManager] = None
self.markets_recorder: Optional[MarketsRecorder] = None
self._script_iterator = None
self._binance_connector = None
# gateway variables
self._shared_client = None
@property
def strategy_file_name(self) -> str:
return self._strategy_file_name
@strategy_file_name.setter
def strategy_file_name(self, value: Optional[str]):
self._strategy_file_name = value
if value is not None:
db_name = value.split(".")[0]
self.trade_fill_db = SQLConnectionManager.get_trade_fills_instance(db_name)
else:
self.trade_fill_db = None
@property
def strategy_config_map(self):
if self.strategy_name is not None:
return get_strategy_config_map(self.strategy_name)
return None
def _notify(self, msg: str):
self.app.log(msg)
for notifier in self.notifiers:
notifier.add_msg_to_queue(msg)
def _handle_command(self, raw_command: str):
# unset to_stop_config flag it triggered before loading any command
if self.app.to_stop_config:
self.app.to_stop_config = False
raw_command = raw_command.lower().strip()
# NOTE: Only done for config command
if raw_command.startswith("config"):
command_split = raw_command.split(maxsplit=2)
else:
command_split = raw_command.split()
try:
if self.placeholder_mode:
pass
else:
# Check if help is requested, if yes, print & terminate
if len(command_split) > 1 and any(arg in ["-h", "--help"] for arg in command_split[1:]):
self.help(command_split[0])
return
shortcuts = global_config_map.get("command_shortcuts").value
shortcut = None
# see if we match against shortcut command
if shortcuts is not None:
for s in shortcuts:
if command_split[0] == s['command']:
shortcut = s
break
# perform shortcut expansion
if shortcut is not None:
# check number of arguments
num_shortcut_args = len(shortcut['arguments'])
if len(command_split) == num_shortcut_args + 1:
# notify each expansion if there's more than 1
verbose = True if len(shortcut['output']) > 1 else False
# do argument replace and re-enter this function with the expanded command
for output_cmd in shortcut['output']:
final_cmd = output_cmd
for i in range(1, num_shortcut_args + 1):
final_cmd = final_cmd.replace(f'${i}', command_split[i])
if verbose is True:
self._notify(f' >>> {final_cmd}')
self._handle_command(final_cmd)
else:
self._notify('Invalid number of arguments for shortcut')
# regular command
else:
args = self.parser.parse_args(args=command_split)
kwargs = vars(args)
if not hasattr(args, "func"):
self.app.handle_tab_command(self, command_split[0], kwargs)
else:
f = args.func
del kwargs["func"]
f(**kwargs)
except ArgumentParserError as e:
if not self.be_silly(raw_command):
self._notify(str(e))
except NotImplementedError:
self._notify("Command not yet implemented. This feature is currently under development.")
except Exception as e:
self.logger().error(e, exc_info=True)
async def _cancel_outstanding_orders(self) -> bool:
success = True
try:
kill_timeout: float = self.KILL_TIMEOUT
self._notify("Cancelling outstanding orders...")
for market_name, market in self.markets.items():
cancellation_results = await market.cancel_all(kill_timeout)
uncancelled = list(filter(lambda cr: cr.success is False, cancellation_results))
if len(uncancelled) > 0:
success = False
uncancelled_order_ids = list(map(lambda cr: cr.order_id, uncancelled))
self._notify("\nFailed to cancel the following orders on %s:\n%s" % (
market_name,
'\n'.join(uncancelled_order_ids)
))
except Exception:
self.logger().error("Error canceling outstanding orders.", exc_info=True)
success = False
if success:
self._notify("All outstanding orders cancelled.")
return success
async def run(self):
await self.app.run()
def add_application_warning(self, app_warning: ApplicationWarning):
self._expire_old_application_warnings()
self._app_warnings.append(app_warning)
def clear_application_warning(self):
self._app_warnings.clear()
@staticmethod
def _initialize_market_assets(market_name: str, trading_pairs: List[str]) -> List[Tuple[str, str]]:
market_trading_pairs: List[Tuple[str, str]] = [(trading_pair.split('-')) for trading_pair in trading_pairs]
return market_trading_pairs
def _initialize_markets(self, market_names: List[Tuple[str, List[str]]]):
# aggregate trading_pairs if there are duplicate markets
for market_name, trading_pairs in market_names:
if market_name not in self.market_trading_pairs_map:
self.market_trading_pairs_map[market_name] = []
for hb_trading_pair in trading_pairs:
self.market_trading_pairs_map[market_name].append(hb_trading_pair)
for connector_name, trading_pairs in self.market_trading_pairs_map.items():
conn_setting = AllConnectorSettings.get_connector_settings()[connector_name]
if connector_name.endswith("paper_trade") and conn_setting.type == ConnectorType.Exchange:
connector = create_paper_trade_market(conn_setting.parent_name, trading_pairs)
paper_trade_account_balance = global_config_map.get("paper_trade_account_balance").value
for asset, balance in paper_trade_account_balance.items():
connector.set_balance(asset, balance)
else:
Security.update_config_map(global_config_map)
keys = {key: config.value for key, config in global_config_map.items()
if key in conn_setting.config_keys}
init_params = conn_setting.conn_init_parameters(keys)
init_params.update(trading_pairs=trading_pairs, trading_required=self._trading_required)
if conn_setting.use_ethereum_wallet:
ethereum_rpc_url = global_config_map.get("ethereum_rpc_url").value
# Todo: Hard coded this execption for now until we figure out how to handle all ethereum connectors.
if connector_name in ["balancer", "uniswap", "uniswap_v3", "perpetual_finance"]:
private_key = get_eth_wallet_private_key()
init_params.update(wallet_private_key=private_key, ethereum_rpc_url=ethereum_rpc_url)
connector_class = get_connector_class(connector_name)
connector = connector_class(**init_params)
self.markets[connector_name] = connector
self.markets_recorder = MarketsRecorder(
self.trade_fill_db,
list(self.markets.values()),
self.strategy_file_name,
self.strategy_name,
)
self.markets_recorder.start()
def _initialize_notifiers(self):
if global_config_map.get("telegram_enabled").value:
# TODO: refactor to use single instance
if not any([isinstance(n, TelegramNotifier) for n in self.notifiers]):
self.notifiers.append(
TelegramNotifier(
token=global_config_map["telegram_token"].value,
chat_id=global_config_map["telegram_chat_id"].value,
hb=self,
)
)
for notifier in self.notifiers:
notifier.start()
def init_command_tabs(self) -> Dict[str, CommandTab]:
"""
Initiates and returns a CommandTab dictionary with mostly defaults and None values, These values will be
populated later on by HummingbotCLI
"""
command_tabs: Dict[str, CommandTab] = {}
for tab_class in tab_classes:
name = tab_class.get_command_name()
command_tabs[name] = CommandTab(name, None, None, None, tab_class)
return command_tabs
|
the-stack_0_4765 | # Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
import numpy as np
import json
import argparse
import sys
import open3d as o3d
sys.path.append("../Utility")
from file import *
from visualization import *
sys.path.append(".")
from initialize_config import *
def list_posegraph_files(folder_posegraph):
pose_graph_paths = get_file_list(folder_posegraph, ".json")
for pose_graph_path in pose_graph_paths:
pose_graph = o3d.io.read_pose_graph(pose_graph_path)
n_nodes = len(pose_graph.nodes)
n_edges = len(pose_graph.edges)
print(
"Fragment o3d.registration.PoseGraph %s has %d nodes and %d edges" %
(pose_graph_path, n_nodes, n_edges))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="visualize pose graph")
parser.add_argument("config", help="path to the config file")
parser.add_argument("--source_id", type=int, help="ID of source fragment")
parser.add_argument("--target_id", type=int, help="ID of target fragment")
parser.add_argument("--adjacent",
help="visualize adjacent pairs",
action="store_true")
parser.add_argument("--all",
help="visualize all pairs",
action="store_true")
parser.add_argument("--list_posegraphs",
help="list number of node and edges of all pose graphs",
action="store_true")
parser.add_argument("--before_optimized",
help="visualize posegraph edges that is not optimized",
action="store_true")
args = parser.parse_args()
with open(args.config) as json_file:
config = json.load(json_file)
initialize_config(config)
ply_file_names = get_file_list(
join(config["path_dataset"], config["folder_fragment"]), ".ply")
if (args.list_posegraphs):
list_posegraph_files(
join(config["path_dataset"], config["folder_fragment"]))
list_posegraph_files(
join(config["path_dataset"], config["folder_scene"]))
if (args.before_optimized):
global_pose_graph_name = join(config["path_dataset"],
config["template_global_posegraph"])
else:
global_pose_graph_name = join(
config["path_dataset"],
config["template_refined_posegraph_optimized"])
print("Reading posegraph")
print(global_pose_graph_name)
pose_graph = o3d.io.read_pose_graph(global_pose_graph_name)
n_nodes = len(pose_graph.nodes)
n_edges = len(pose_graph.edges)
print("Global o3d.registration.PoseGraph having %d nodes and %d edges" % \
(n_nodes, n_edges))
# visualize alignment of posegraph edges
for edge in pose_graph.edges:
print("o3d.registration.PoseGraphEdge %d-%d" % \
(edge.source_node_id, edge.target_node_id))
if ((args.adjacent and \
edge.target_node_id - edge.source_node_id == 1)) or \
(not args.adjacent and
(args.source_id == edge.source_node_id and \
args.target_id == edge.target_node_id)) or \
args.all:
print(" confidence : %.3f" % edge.confidence)
source = o3d.io.read_point_cloud(
ply_file_names[edge.source_node_id])
target = o3d.io.read_point_cloud(
ply_file_names[edge.target_node_id])
source_down = source.voxel_down_sample(config["voxel_size"])
target_down = target.voxel_down_sample(config["voxel_size"])
print("original registration")
draw_registration_result(source_down, target_down,
edge.transformation)
print("optimized registration")
source_down.transform(
pose_graph.nodes[edge.source_node_id].pose)
target_down.transform(
pose_graph.nodes[edge.target_node_id].pose)
draw_registration_result(source_down, target_down,
np.identity(4))
|
the-stack_0_4766 | import cv2
import csv
global clickCoordinates
clickCoordinates = list()
def click_point(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
clickCoordinates.append((x, y))
image = cv2.imread("../protos/textures/rover_circuit.jpg")
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_point)
while True:
cv2.imshow("image", image)
key = cv2.waitKey(1)
if key == ord(" "):
break
with open('waypoints.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(["waypointID", "coordinateX", "coordinateY"])
for coordinateID, eachClickCoordinatePair in enumerate(clickCoordinates):
writer.writerow([coordinateID, eachClickCoordinatePair[0], eachClickCoordinatePair[1]]) |
the-stack_0_4767 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# This application is an example on how to use aioblescan
#
# Copyright (c) 2017 François Wautier
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
import sys
import asyncio
import argparse
import re
import aioblescan as aiobs
from aioblescan.plugins import EddyStone
from aioblescan.plugins import RuuviWeather
from aioblescan.plugins import ATCMiThermometer
from aioblescan.plugins import ThermoBeacon
# global
opts = None
def check_mac(val):
try:
if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", val.lower()):
return val.lower()
except:
pass
raise argparse.ArgumentTypeError("%s is not a MAC address" % val)
def my_process(data):
global opts
ev = aiobs.HCI_Event()
xx = ev.decode(data)
if opts.mac:
goon = False
mac = ev.retrieve("peer")
for x in mac:
if x.val in opts.mac:
goon = True
break
if not goon:
return
if opts.raw:
print("Raw data: {}".format(ev.raw_data))
noopt = True
if opts.eddy:
noopt = False
xx = EddyStone().decode(ev)
if xx:
print("Google Beacon {}".format(xx))
return
if opts.ruuvi:
noopt = False
xx = RuuviWeather().decode(ev)
if xx:
print("Weather info {}".format(xx))
return
if opts.atcmi:
noopt = False
xx = ATCMiThermometer().decode(ev)
if xx:
print("Temperature info {}".format(xx))
return
if opts.thermobeacon:
noopt = False
xx = ThermoBeacon().decode(ev)
if xx:
print("Temperature info {}".format(xx))
return
if noopt:
ev.show(0)
def main(args=None):
global opts
parser = argparse.ArgumentParser(description="Track BLE advertised packets")
parser.add_argument(
"-e",
"--eddy",
action="store_true",
default=False,
help="Look specificaly for Eddystone messages.",
)
parser.add_argument(
"-m",
"--mac",
type=check_mac,
action="append",
help="Look for these MAC addresses.",
)
parser.add_argument(
"-r",
"--ruuvi",
action="store_true",
default=False,
help="Look only for Ruuvi tag Weather station messages",
)
parser.add_argument(
"-A",
"--atcmi",
action="store_true",
default=False,
help="Look only for ATC_MiThermometer tag messages",
)
parser.add_argument(
"-T",
"--thermobeacon",
action="store_true",
default=False,
help="Look only for ThermoBeacon messages",
)
parser.add_argument(
"-R",
"--raw",
action="store_true",
default=False,
help="Also show the raw data.",
)
parser.add_argument(
"-a",
"--advertise",
type=int,
default=0,
help="Broadcast like an EddyStone Beacon. Set the interval between packet in millisec",
)
parser.add_argument(
"-u",
"--url",
type=str,
default="",
help="When broadcasting like an EddyStone Beacon, set the url.",
)
parser.add_argument(
"-t",
"--txpower",
type=int,
default=0,
help="When broadcasting like an EddyStone Beacon, set the Tx power",
)
parser.add_argument(
"-D",
"--device",
type=int,
default=0,
help="Select the hciX device to use (default 0, i.e. hci0).",
)
try:
opts = parser.parse_args()
except Exception as e:
parser.error("Error: " + str(e))
sys.exit()
event_loop = asyncio.get_event_loop()
# First create and configure a raw socket
mysocket = aiobs.create_bt_socket(opts.device)
# create a connection with the raw socket
# This used to work but now requires a STREAM socket.
# fac=event_loop.create_connection(aiobs.BLEScanRequester,sock=mysocket)
# Thanks to martensjacobs for this fix
fac = event_loop._create_connection_transport(
mysocket, aiobs.BLEScanRequester, None, None
)
# Start it
conn, btctrl = event_loop.run_until_complete(fac)
# Attach your processing
btctrl.process = my_process
if opts.advertise:
command = aiobs.HCI_Cmd_LE_Advertise(enable=False)
event_loop.run_until_complete(btctrl.send_command(command))
command = aiobs.HCI_Cmd_LE_Set_Advertised_Params(
interval_min=opts.advertise, interval_max=opts.advertise
)
event_loop.run_until_complete(btctrl.send_command(command))
if opts.url:
myeddy = EddyStone(param=opts.url)
else:
myeddy = EddyStone()
if opts.txpower:
myeddy.power = opts.txpower
command = aiobs.HCI_Cmd_LE_Set_Advertised_Msg(msg=myeddy)
event_loop.run_until_complete(btctrl.send_command(command))
command = aiobs.HCI_Cmd_LE_Advertise(enable=True)
event_loop.run_until_complete(btctrl.send_command(command))
# Probe
event_loop.run_until_complete(btctrl.send_scan_request())
try:
# event_loop.run_until_complete(coro)
event_loop.run_forever()
except KeyboardInterrupt:
print("keyboard interrupt")
finally:
print("closing event loop")
event_loop.run_until_complete(btctrl.stop_scan_request())
command = aiobs.HCI_Cmd_LE_Advertise(enable=False)
event_loop.run_until_complete(btctrl.send_command(command))
conn.close()
event_loop.close()
if __name__ == "__main__":
main()
|
the-stack_0_4768 | # PyAudio : Python Bindings for PortAudio.
# Copyright (c) 2006 Hubert Pham
# Copyright (c) 2020 Svein Seldal
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
PyAudio provides Python bindings for PortAudio, the cross-platform
audio I/O library. With PyAudio, you can easily use Python to play and
record audio on a variety of platforms. PyAudio is inspired by:
* pyPortAudio/fastaudio: Python bindings for PortAudio v18 API.
* tkSnack: cross-platform sound toolkit for Tcl/Tk and Python.
.. include:: ../sphinx/examples.rst
Overview
--------
**Classes**
:py:class:`PyAudio`, :py:class:`Stream`
.. only:: pamac
**Host Specific Classes**
:py:class:`PaMacCoreStreamInfo`
**Stream Conversion Convenience Functions**
:py:func:`get_sample_size`, :py:func:`get_format_from_width`
**PortAudio version**
:py:func:`get_portaudio_version`, :py:func:`get_portaudio_version_text`
.. |PaSampleFormat| replace:: :ref:`PortAudio Sample Format <PaSampleFormat>`
.. _PaSampleFormat:
**Portaudio Sample Formats**
:py:data:`paFloat32`, :py:data:`paInt32`, :py:data:`paInt24`,
:py:data:`paInt16`, :py:data:`paInt8`, :py:data:`paUInt8`,
:py:data:`paCustomFormat`
.. |PaHostAPI| replace:: :ref:`PortAudio Host API <PaHostAPI>`
.. _PaHostAPI:
**PortAudio Host APIs**
:py:data:`paInDevelopment`, :py:data:`paDirectSound`, :py:data:`paMME`,
:py:data:`paASIO`, :py:data:`paSoundManager`, :py:data:`paCoreAudio`,
:py:data:`paOSS`, :py:data:`paALSA`, :py:data:`paAL`, :py:data:`paBeOS`,
:py:data:`paWDMKS`, :py:data:`paJACK`, :py:data:`paWASAPI`,
:py:data:`paNoDevice`
.. |PaErrorCode| replace:: :ref:`PortAudio Error Code <PaErrorCode>`
.. _PaErrorCode:
**PortAudio Error Codes**
:py:data:`paNoError`, :py:data:`paNotInitialized`,
:py:data:`paUnanticipatedHostError`, :py:data:`paInvalidChannelCount`,
:py:data:`paInvalidSampleRate`, :py:data:`paInvalidDevice`,
:py:data:`paInvalidFlag`, :py:data:`paSampleFormatNotSupported`,
:py:data:`paBadIODeviceCombination`, :py:data:`paInsufficientMemory`,
:py:data:`paBufferTooBig`, :py:data:`paBufferTooSmall`,
:py:data:`paNullCallback`, :py:data:`paBadStreamPtr`,
:py:data:`paTimedOut`, :py:data:`paInternalError`,
:py:data:`paDeviceUnavailable`,
:py:data:`paIncompatibleHostApiSpecificStreamInfo`,
:py:data:`paStreamIsStopped`, :py:data:`paStreamIsNotStopped`,
:py:data:`paInputOverflowed`, :py:data:`paOutputUnderflowed`,
:py:data:`paHostApiNotFound`, :py:data:`paInvalidHostApi`,
:py:data:`paCanNotReadFromACallbackStream`,
:py:data:`paCanNotWriteToACallbackStream`,
:py:data:`paCanNotReadFromAnOutputOnlyStream`,
:py:data:`paCanNotWriteToAnInputOnlyStream`,
:py:data:`paIncompatibleStreamHostApi`
.. |PaCallbackReturnCodes| replace:: :ref:`PortAudio Callback Return Code <PaCallbackReturnCodes>`
.. _PaCallbackReturnCodes:
**PortAudio Callback Return Codes**
:py:data:`paContinue`, :py:data:`paComplete`, :py:data:`paAbort`
.. |PaCallbackFlags| replace:: :ref:`PortAutio Callback Flag <PaCallbackFlags>`
.. _PaCallbackFlags:
**PortAudio Callback Flags**
:py:data:`paInputUnderflow`, :py:data:`paInputOverflow`,
:py:data:`paOutputUnderflow`, :py:data:`paOutputOverflow`,
:py:data:`paPrimingOutput`
"""
__author__ = "Hubert Pham"
__version__ = "0.2.11"
__docformat__ = "restructuredtext en"
import sys
# attempt to import PortAudio
try:
import _portaudio as pa
except ImportError:
print("Could not import the PyAudio C module '_portaudio'.")
raise
############################################################
# GLOBALS
############################################################
##### PaSampleFormat Sample Formats #####
paFloat32 = pa.paFloat32 #: 32 bit float
paInt32 = pa.paInt32 #: 32 bit int
paInt24 = pa.paInt24 #: 24 bit int
paInt16 = pa.paInt16 #: 16 bit int
paInt8 = pa.paInt8 #: 8 bit int
paUInt8 = pa.paUInt8 #: 8 bit unsigned int
paCustomFormat = pa.paCustomFormat #: a custom data format
###### HostAPI TypeId #####
paInDevelopment = pa.paInDevelopment #: Still in development
paDirectSound = pa.paDirectSound #: DirectSound (Windows only)
paMME = pa.paMME #: Multimedia Extension (Windows only)
paASIO = pa.paASIO #: Steinberg Audio Stream Input/Output
paSoundManager = pa.paSoundManager #: SoundManager (OSX only)
paCoreAudio = pa.paCoreAudio #: CoreAudio (OSX only)
paOSS = pa.paOSS #: Open Sound System (Linux only)
paALSA = pa.paALSA #: Advanced Linux Sound Architecture (Linux only)
paAL = pa.paAL #: Open Audio Library
paBeOS = pa.paBeOS #: BeOS Sound System
paWDMKS = pa.paWDMKS #: Windows Driver Model (Windows only)
paJACK = pa.paJACK #: JACK Audio Connection Kit
paWASAPI = pa.paWASAPI #: Windows Vista Audio stack architecture
paNoDevice = pa.paNoDevice #: Not actually an audio device
###### portaudio error codes #####
paNoError = pa.paNoError
paNotInitialized = pa.paNotInitialized
paUnanticipatedHostError = pa.paUnanticipatedHostError
paInvalidChannelCount = pa.paInvalidChannelCount
paInvalidSampleRate = pa.paInvalidSampleRate
paInvalidDevice = pa.paInvalidDevice
paInvalidFlag = pa.paInvalidFlag
paSampleFormatNotSupported = pa.paSampleFormatNotSupported
paBadIODeviceCombination = pa.paBadIODeviceCombination
paInsufficientMemory = pa.paInsufficientMemory
paBufferTooBig = pa.paBufferTooBig
paBufferTooSmall = pa.paBufferTooSmall
paNullCallback = pa.paNullCallback
paBadStreamPtr = pa.paBadStreamPtr
paTimedOut = pa.paTimedOut
paInternalError = pa.paInternalError
paDeviceUnavailable = pa.paDeviceUnavailable
paIncompatibleHostApiSpecificStreamInfo = pa.paIncompatibleHostApiSpecificStreamInfo
paStreamIsStopped = pa.paStreamIsStopped
paStreamIsNotStopped = pa.paStreamIsNotStopped
paInputOverflowed = pa.paInputOverflowed
paOutputUnderflowed = pa.paOutputUnderflowed
paHostApiNotFound = pa.paHostApiNotFound
paInvalidHostApi = pa.paInvalidHostApi
paCanNotReadFromACallbackStream = pa.paCanNotReadFromACallbackStream
paCanNotWriteToACallbackStream = pa.paCanNotWriteToACallbackStream
paCanNotReadFromAnOutputOnlyStream = pa.paCanNotReadFromAnOutputOnlyStream
paCanNotWriteToAnInputOnlyStream = pa.paCanNotWriteToAnInputOnlyStream
paIncompatibleStreamHostApi = pa.paIncompatibleStreamHostApi
###### portaudio callback return codes ######
paContinue = pa.paContinue #: There is more audio data to come
paComplete = pa.paComplete #: This was the last block of audio data
paAbort = pa.paAbort #: An error ocurred, stop playback/recording
###### portaudio callback flags ######
paInputUnderflow = pa.paInputUnderflow #: Buffer underflow in input
paInputOverflow = pa.paInputOverflow #: Buffer overflow in input
paOutputUnderflow = pa.paOutputUnderflow #: Buffer underflow in output
paOutputOverflow = pa.paOutputOverflow #: Buffer overflow in output
paPrimingOutput = pa.paPrimingOutput #: Just priming, not playing yet
############################################################
# Convenience Functions
############################################################
def get_sample_size(format):
"""
Returns the size (in bytes) for the specified
sample *format*.
:param format: A |PaSampleFormat| constant.
:raises ValueError: on invalid specified `format`.
:rtype: integer
"""
return pa.get_sample_size(format)
def get_format_from_width(width, unsigned=True):
"""
Returns a PortAudio format constant for the specified *width*.
:param width: The desired sample width in bytes (1, 2, 3, or 4)
:param unsigned: For 1 byte width, specifies signed or unsigned format.
:raises ValueError: when invalid *width*
:rtype: A |PaSampleFormat| constant
"""
if width == 1:
if unsigned:
return paUInt8
else:
return paInt8
elif width == 2:
return paInt16
elif width == 3:
return paInt24
elif width == 4:
return paFloat32
else:
raise ValueError("Invalid width: %d" % width)
############################################################
# Versioning
############################################################
def get_portaudio_version():
"""
Returns portaudio version.
:rtype: string
"""
return pa.get_version()
def get_portaudio_version_text():
"""
Returns PortAudio version as a text string.
:rtype: string
"""
return pa.get_version_text()
############################################################
# Wrapper around _portaudio Stream (Internal)
############################################################
# Note: See PyAudio class below for main export.
class Stream:
"""
PortAudio Stream Wrapper. Use :py:func:`PyAudio.open` to make a new
:py:class:`Stream`.
**Opening and Closing**
:py:func:`__init__`, :py:func:`close`
**Stream Info**
:py:func:`get_input_latency`, :py:func:`get_output_latency`,
:py:func:`get_time`, :py:func:`get_cpu_load`
**Stream Management**
:py:func:`start_stream`, :py:func:`stop_stream`, :py:func:`is_active`,
:py:func:`is_stopped`
**Input Output**
:py:func:`write`, :py:func:`read`, :py:func:`get_read_available`,
:py:func:`get_write_available`
"""
def __init__(self,
PA_manager,
rate,
input_channels,
output_channels,
format,
input=False,
output=False,
input_device_index=None,
output_device_index=None,
frames_per_buffer=1024,
start=True,
input_host_api_specific_stream_info=None,
output_host_api_specific_stream_info=None,
stream_callback=None):
"""
Initialize a stream; this should be called by
:py:func:`PyAudio.open`. A stream can either be input, output,
or both.
:param PA_manager: A reference to the managing :py:class:`PyAudio`
instance
:param rate: Sampling rate
:param input_channels: Number of input channels
:param output_channels: Number of output channels
:param format: Sampling size and format. See |PaSampleFormat|.
:param input: Specifies whether this is an input stream.
Defaults to ``False``.
:param output: Specifies whether this is an output stream.
Defaults to ``False``.
:param input_device_index: Index of Input Device to use.
Unspecified (or ``None``) uses default device.
Ignored if `input` is ``False``.
:param output_device_index:
Index of Output Device to use.
Unspecified (or ``None``) uses the default device.
Ignored if `output` is ``False``.
:param frames_per_buffer: Specifies the number of frames per buffer.
:param start: Start the stream running immediately.
Defaults to ``True``. In general, there is no reason to set
this to ``False``.
:param input_host_api_specific_stream_info: Specifies a host API
specific stream information data structure for input.
.. only:: pamac
See :py:class:`PaMacCoreStreamInfo`.
:param output_host_api_specific_stream_info: Specifies a host API
specific stream information data structure for output.
.. only:: pamac
See :py:class:`PaMacCoreStreamInfo`.
:param stream_callback: Specifies a callback function for
*non-blocking* (callback) operation. Default is
``None``, which indicates *blocking* operation (i.e.,
:py:func:`Stream.read` and :py:func:`Stream.write`). To use
non-blocking operation, specify a callback that conforms
to the following signature:
.. code-block:: python
callback(in_data, # recorded data if input=True; else None
frame_count, # number of frames
time_info, # dictionary
status_flags) # PaCallbackFlags
``time_info`` is a dictionary with the following keys:
``input_buffer_adc_time``, ``current_time``, and
``output_buffer_dac_time``; see the PortAudio
documentation for their meanings. ``status_flags`` is one
of |PaCallbackFlags|.
The callback must return a tuple:
.. code-block:: python
(out_data, flag)
``out_data`` is a byte array whose length should be the
(``frame_count * output_channels * bytes-per-channel``) if
``output=True`` or ``None`` if ``output=False``. ``flag``
must be either :py:data:`paContinue`, :py:data:`paComplete` or
:py:data:`paAbort` (one of |PaCallbackReturnCodes|).
When ``output=True`` and ``out_data`` does not contain at
least ``frame_count`` frames, :py:data:`paComplete` is
assumed for ``flag``.
**Note:** ``stream_callback`` is called in a separate
thread (from the main thread). Exceptions that occur in
the ``stream_callback`` will:
1. print a traceback on standard error to aid debugging,
2. queue the exception to be thrown (at some point) in
the main thread, and
3. return `paAbort` to PortAudio to stop the stream.
**Note:** Do not call :py:func:`Stream.read` or
:py:func:`Stream.write` if using non-blocking operation.
**See:** PortAudio's callback signature for additional
details: http://portaudio.com/docs/v19-doxydocs/portaudio_8h.html#a8a60fb2a5ec9cbade3f54a9c978e2710
:raise ValueError: Neither input nor output are set True.
"""
# no stupidity allowed
if not (input or output):
raise ValueError("Must specify an input or output " + "stream.")
# remember parent
self._parent = PA_manager
# remember if we are an: input, output (or both)
self._is_input = input
self._is_output = output
# are we running?
self._is_running = start
# remember some parameters
self._rate = rate
self._input_channels = input_channels
self._output_channels = output_channels
self._format = format
self._frames_per_buffer = frames_per_buffer
arguments = {
'rate' : rate,
'input_channels' : input_channels,
'output_channels' : output_channels,
'format' : format,
'input' : input,
'output' : output,
'input_device_index' : input_device_index,
'output_device_index' : output_device_index,
'frames_per_buffer' : frames_per_buffer}
if input_host_api_specific_stream_info:
_l = input_host_api_specific_stream_info
arguments[
'input_host_api_specific_stream_info'
] = _l._get_host_api_stream_object()
if output_host_api_specific_stream_info:
_l = output_host_api_specific_stream_info
arguments[
'output_host_api_specific_stream_info'
] = _l._get_host_api_stream_object()
if stream_callback:
arguments['stream_callback'] = stream_callback
# calling pa.open returns a stream object
self._stream = pa.open(**arguments)
self._input_latency = self._stream.inputLatency
self._output_latency = self._stream.outputLatency
if self._is_running:
pa.start_stream(self._stream)
def close(self):
""" Close the stream """
pa.close(self._stream)
self._is_running = False
self._parent._remove_stream(self)
############################################################
# Stream Info
############################################################
def get_input_latency(self):
"""
Return the input latency.
:rtype: float
"""
return self._stream.inputLatency
def get_output_latency(self):
"""
Return the output latency.
:rtype: float
"""
return self._stream.outputLatency
def get_time(self):
"""
Return stream time.
:rtype: float
"""
return pa.get_stream_time(self._stream)
def get_cpu_load(self):
"""
Return the CPU load. This is always 0.0 for the
blocking API.
:rtype: float
"""
return pa.get_stream_cpu_load(self._stream)
############################################################
# Stream Management
############################################################
def start_stream(self):
""" Start the stream. """
if self._is_running:
return
pa.start_stream(self._stream)
self._is_running = True
def stop_stream(self):
"""
Stop the stream. Once the stream is stopped, one may not call
write or read. Call :py:func:`start_stream` to resume the
stream.
"""
if not self._is_running:
return
pa.stop_stream(self._stream)
self._is_running = False
def is_active(self):
"""
Returns whether the stream is active.
:rtype: bool
"""
return pa.is_stream_active(self._stream)
def is_stopped(self):
"""
Returns whether the stream is stopped.
:rtype: bool
"""
return pa.is_stream_stopped(self._stream)
############################################################
# Reading/Writing
############################################################
def write(self, frames, num_frames=None,
exception_on_underflow=False):
"""
Write samples to the stream. Do not call when using
*non-blocking* mode.
:param frames:
The frames of data.
:param num_frames:
The number of frames to write.
Defaults to None, in which this value will be
automatically computed.
:param exception_on_underflow:
Specifies whether an IOError exception should be thrown
(or silently ignored) on buffer underflow. Defaults
to False for improved performance, especially on
slower platforms.
:raises IOError: if the stream is not an output stream
or if the write operation was unsuccessful.
:rtype: `None`
"""
if not self._is_output:
raise IOError("Not output stream",
paCanNotWriteToAnInputOnlyStream)
if num_frames == None:
# determine how many frames to read
width = get_sample_size(self._format)
num_frames = int(len(frames) / (self._output_channels * width))
#print len(frames), self._output_channels, self._width, num_frames
pa.write_stream(self._stream, frames, num_frames,
exception_on_underflow)
def read(self, num_frames, exception_on_overflow=True):
"""
Read samples from the stream. Do not call when using
*non-blocking* mode.
:param num_frames: The number of frames to read.
:param exception_on_overflow:
Specifies whether an IOError exception should be thrown
(or silently ignored) on input buffer overflow. Defaults
to True.
:raises IOError: if stream is not an input stream
or if the read operation was unsuccessful.
:rtype: string
"""
if not self._is_input:
raise IOError("Not input stream",
paCanNotReadFromAnOutputOnlyStream)
return pa.read_stream(self._stream, num_frames, exception_on_overflow)
def get_read_available(self):
"""
Return the number of frames that can be read without waiting.
:rtype: integer
"""
return pa.get_stream_read_available(self._stream)
def get_write_available(self):
"""
Return the number of frames that can be written without
waiting.
:rtype: integer
"""
return pa.get_stream_write_available(self._stream)
############################################################
# Main Export
############################################################
class PyAudio:
"""
Python interface to PortAudio. Provides methods to:
- initialize and terminate PortAudio
- open and close streams
- query and inspect the available PortAudio Host APIs
- query and inspect the available PortAudio audio
devices
Use this class to open and close streams.
**Stream Management**
:py:func:`open`, :py:func:`close`
**Host API**
:py:func:`get_host_api_count`, :py:func:`get_default_host_api_info`,
:py:func:`get_host_api_info_by_type`,
:py:func:`get_host_api_info_by_index`,
:py:func:`get_device_info_by_host_api_device_index`
**Device API**
:py:func:`get_device_count`, :py:func:`is_format_supported`,
:py:func:`get_default_input_device_info`,
:py:func:`get_default_output_device_info`,
:py:func:`get_device_info_by_index`
**Stream Format Conversion**
:py:func:`get_sample_size`, :py:func:`get_format_from_width`
**Details**
"""
############################################################
# Initialization and Termination
############################################################
def __init__(self):
"""Initialize PortAudio."""
pa.initialize()
self._streams = set()
def terminate(self):
"""
Terminate PortAudio.
:attention: Be sure to call this method for every instance of
this object to release PortAudio resources.
"""
for stream in self._streams.copy():
stream.close()
self._streams = set()
pa.terminate()
############################################################
# Stream Format
############################################################
def get_sample_size(self, format):
"""
Returns the size (in bytes) for the specified
sample `format` (a |PaSampleFormat| constant).
:param format: A |PaSampleFormat| constant.
:raises ValueError: Invalid specified `format`.
:rtype: integer
"""
return pa.get_sample_size(format)
def get_format_from_width(self, width, unsigned=True):
"""
Returns a PortAudio format constant for the specified `width`.
:param width: The desired sample width in bytes (1, 2, 3, or 4)
:param unsigned: For 1 byte width, specifies signed or unsigned format.
:raises ValueError: for invalid `width`
:rtype: A |PaSampleFormat| constant.
"""
if width == 1:
if unsigned:
return paUInt8
else:
return paInt8
elif width == 2:
return paInt16
elif width == 3:
return paInt24
elif width == 4:
return paFloat32
else:
raise ValueError("Invalid width: %d" % width)
############################################################
# Stream Factory
############################################################
def open(self, *args, **kwargs):
"""
Open a new stream. See constructor for
:py:func:`Stream.__init__` for parameter details.
:returns: A new :py:class:`Stream`
"""
stream = Stream(self, *args, **kwargs)
self._streams.add(stream)
return stream
def close(self, stream):
"""
Close a stream. Typically use :py:func:`Stream.close` instead.
:param stream: An instance of the :py:class:`Stream` object.
:raises ValueError: if stream does not exist.
"""
if stream not in self._streams:
raise ValueError("Stream `%s' not found" % str(stream))
stream.close()
def _remove_stream(self, stream):
"""
Internal method. Removes a stream.
:param stream: An instance of the :py:class:`Stream` object.
"""
if stream in self._streams:
self._streams.remove(stream)
############################################################
# Host API Inspection
############################################################
def get_host_api_count(self):
"""
Return the number of available PortAudio Host APIs.
:rtype: integer
"""
return pa.get_host_api_count()
def get_default_host_api_info(self):
"""
Return a dictionary containing the default Host API
parameters. The keys of the dictionary mirror the data fields
of PortAudio's ``PaHostApiInfo`` structure.
:raises IOError: if no default input device is available
:rtype: dict
"""
defaultHostApiIndex = pa.get_default_host_api()
return self.get_host_api_info_by_index(defaultHostApiIndex)
def get_host_api_info_by_type(self, host_api_type):
"""
Return a dictionary containing the Host API parameters for the
host API specified by the `host_api_type`. The keys of the
dictionary mirror the data fields of PortAudio's ``PaHostApiInfo``
structure.
:param host_api_type: The desired |PaHostAPI|
:raises IOError: for invalid `host_api_type`
:rtype: dict
"""
index = pa.host_api_type_id_to_host_api_index(host_api_type)
return self.get_host_api_info_by_index(index)
def get_host_api_info_by_index(self, host_api_index):
"""
Return a dictionary containing the Host API parameters for the
host API specified by the `host_api_index`. The keys of the
dictionary mirror the data fields of PortAudio's ``PaHostApiInfo``
structure.
:param host_api_index: The host api index
:raises IOError: for invalid `host_api_index`
:rtype: dict
"""
return self._make_host_api_dictionary(
host_api_index,
pa.get_host_api_info(host_api_index)
)
def get_device_info_by_host_api_device_index(self,
host_api_index,
host_api_device_index):
"""
Return a dictionary containing the Device parameters for a
given Host API's n'th device. The keys of the dictionary
mirror the data fields of PortAudio's ``PaDeviceInfo`` structure.
:param host_api_index: The Host API index number
:param host_api_device_index: The n'th device of the host API
:raises IOError: for invalid indices
:rtype: dict
"""
long_method_name = pa.host_api_device_index_to_device_index
device_index = long_method_name(host_api_index,
host_api_device_index)
return self.get_device_info_by_index(device_index)
def _make_host_api_dictionary(self, index, host_api_struct):
"""
Internal method to create Host API dictionary that mirrors
PortAudio's ``PaHostApiInfo`` structure.
:rtype: dict
"""
return {'index' : index,
'structVersion' : host_api_struct.structVersion,
'type' : host_api_struct.type,
'name' : host_api_struct.name,
'deviceCount' : host_api_struct.deviceCount,
'defaultInputDevice' : host_api_struct.defaultInputDevice,
'defaultOutputDevice' : host_api_struct.defaultOutputDevice}
############################################################
# Device Inspection
############################################################
def get_device_count(self):
"""
Return the number of PortAudio Host APIs.
:rtype: integer
"""
return pa.get_device_count()
def is_format_supported(self, rate,
input_device=None,
input_channels=None,
input_format=None,
input_host_api_specific_stream_info=None,
output_device=None,
output_channels=None,
output_format=None,
output_host_api_specific_stream_info=None):
"""
Check to see if specified device configuration
is supported. Returns True if the configuration
is supported; throws a ValueError exception otherwise.
:param rate:
Specifies the desired rate (in Hz)
:param input_device:
The input device index. Specify ``None`` (default) for
half-duplex output-only streams.
:param input_channels:
The desired number of input channels. Ignored if
`input_device` is not specified (or ``None``).
:param input_format:
PortAudio sample format constant defined
in this module
:param output_device:
The output device index. Specify ``None`` (default) for
half-duplex input-only streams.
:param output_channels:
The desired number of output channels. Ignored if
`input_device` is not specified (or ``None``).
:param output_format:
|PaSampleFormat| constant.
:rtype: bool
:raises ValueError: tuple containing (error string, |PaErrorCode|).
"""
if input_device == None and output_device == None:
raise ValueError("must specify stream format for input, " +\
"output, or both", paInvalidDevice);
kwargs = {}
if input_device != None:
kwargs['input_device'] = input_device
kwargs['input_channels'] = input_channels
kwargs['input_format'] = input_format
if input_host_api_specific_stream_info:
kwargs['input_host_api_specific_stream_info'] = (
input_host_api_specific_stream_info._get_host_api_stream_object()
)
if output_device != None:
kwargs['output_device'] = output_device
kwargs['output_channels'] = output_channels
kwargs['output_format'] = output_format
if output_host_api_specific_stream_info:
kwargs['output_host_api_specific_stream_info'] = (
output_host_api_specific_stream_info._get_host_api_stream_object()
)
return pa.is_format_supported(rate, **kwargs)
def get_default_input_device_info(self):
"""
Return the default input Device parameters as a
dictionary. The keys of the dictionary mirror the data fields
of PortAudio's ``PaDeviceInfo`` structure.
:raises IOError: No default input device available.
:rtype: dict
"""
device_index = pa.get_default_input_device()
return self.get_device_info_by_index(device_index)
def get_default_output_device_info(self):
"""
Return the default output Device parameters as a
dictionary. The keys of the dictionary mirror the data fields
of PortAudio's ``PaDeviceInfo`` structure.
:raises IOError: No default output device available.
:rtype: dict
"""
device_index = pa.get_default_output_device()
return self.get_device_info_by_index(device_index)
def get_device_info_by_index(self, device_index):
"""
Return the Device parameters for device specified in
`device_index` as a dictionary. The keys of the dictionary
mirror the data fields of PortAudio's ``PaDeviceInfo``
structure.
:param device_index: The device index
:raises IOError: Invalid `device_index`.
:rtype: dict
"""
return self._make_device_info_dictionary(
device_index,
pa.get_device_info(device_index)
)
def _make_device_info_dictionary(self, index, device_info):
"""
Internal method to create Device Info dictionary that mirrors
PortAudio's ``PaDeviceInfo`` structure.
:rtype: dict
"""
device_name = device_info.name
# Attempt to decode device_name
for codec in ["utf-8", "cp1252"]:
try:
device_name = device_name.decode(codec)
break
except:
pass
# If we fail to decode, we return the raw bytes and let the caller
# deal with the encoding.
return {'index' : index,
'structVersion' : device_info.structVersion,
'name' : device_name,
'hostApi' : device_info.hostApi,
'maxInputChannels' : device_info.maxInputChannels,
'maxOutputChannels' : device_info.maxOutputChannels,
'defaultLowInputLatency' :
device_info.defaultLowInputLatency,
'defaultLowOutputLatency' :
device_info.defaultLowOutputLatency,
'defaultHighInputLatency' :
device_info.defaultHighInputLatency,
'defaultHighOutputLatency' :
device_info.defaultHighOutputLatency,
'defaultSampleRate' :
device_info.defaultSampleRate
}
######################################################################
# Host Specific Stream Info
######################################################################
try:
paMacCoreStreamInfo = pa.paMacCoreStreamInfo
except AttributeError:
pass
else:
class PaMacCoreStreamInfo:
"""
Mac OS X-only: PaMacCoreStreamInfo is a PortAudio Host API
Specific Stream Info data structure for specifying Mac OS
X-only settings. Instantiate this class (if desired) and pass
the instance as the argument in :py:func:`PyAudio.open` to parameters
``input_host_api_specific_stream_info`` or
``output_host_api_specific_stream_info``.
(See :py:func:`Stream.__init__`.)
:note: Mac OS X only.
.. |PaMacCoreFlags| replace:: :ref:`PortAudio Mac Core Flags <PaMacCoreFlags>`
.. _PaMacCoreFlags:
**PortAudio Mac Core Flags**
:py:data:`paMacCoreChangeDeviceParameters`,
:py:data:`paMacCoreFailIfConversionRequired`,
:py:data:`paMacCoreConversionQualityMin`,
:py:data:`paMacCoreConversionQualityMedium`,
:py:data:`paMacCoreConversionQualityLow`,
:py:data:`paMacCoreConversionQualityHigh`,
:py:data:`paMacCoreConversionQualityMax`,
:py:data:`paMacCorePlayNice`,
:py:data:`paMacCorePro`,
:py:data:`paMacCoreMinimizeCPUButPlayNice`,
:py:data:`paMacCoreMinimizeCPU`
**Settings**
:py:func:`get_flags`, :py:func:`get_channel_map`
"""
paMacCoreChangeDeviceParameters = pa.paMacCoreChangeDeviceParameters
paMacCoreFailIfConversionRequired = pa.paMacCoreFailIfConversionRequired
paMacCoreConversionQualityMin = pa.paMacCoreConversionQualityMin
paMacCoreConversionQualityMedium = pa.paMacCoreConversionQualityMedium
paMacCoreConversionQualityLow = pa.paMacCoreConversionQualityLow
paMacCoreConversionQualityHigh = pa.paMacCoreConversionQualityHigh
paMacCoreConversionQualityMax = pa.paMacCoreConversionQualityMax
paMacCorePlayNice = pa.paMacCorePlayNice
paMacCorePro = pa.paMacCorePro
paMacCoreMinimizeCPUButPlayNice = pa.paMacCoreMinimizeCPUButPlayNice
paMacCoreMinimizeCPU = pa.paMacCoreMinimizeCPU
def __init__(self, flags=None, channel_map=None):
"""
Initialize with flags and channel_map. See PortAudio
documentation for more details on these parameters; they are
passed almost verbatim to the PortAudio library.
:param flags: |PaMacCoreFlags| OR'ed together.
See :py:class:`PaMacCoreStreamInfo`.
:param channel_map: An array describing the channel mapping.
See PortAudio documentation for usage.
"""
kwargs = {"flags" : flags,
"channel_map" : channel_map}
if flags == None:
del kwargs["flags"]
if channel_map == None:
del kwargs["channel_map"]
self._paMacCoreStreamInfo = paMacCoreStreamInfo(**kwargs)
def get_flags(self):
"""
Return the flags set at instantiation.
:rtype: integer
"""
return self._paMacCoreStreamInfo.flags
def get_channel_map(self):
"""
Return the channel map set at instantiation.
:rtype: tuple or None
"""
return self._paMacCoreStreamInfo.channel_map
def _get_host_api_stream_object(self):
"""Private method."""
return self._paMacCoreStreamInfo
try:
paWasapiStreamInfo = pa.paWasapiStreamInfo
except AttributeError:
pass
else:
class PaWasapiStreamInfo:
paWinWasapiExclusive = pa.paWinWasapiExclusive
# paWinWasapiRedirectHostProcessor = pa.paWinWasapiRedirectHostProcessor
# paWinWasapiUseChannelMask = pa.paWinWasapiUseChannelMask
paWinWasapiPolling = pa.paWinWasapiPolling
# paWinWasapiThreadPriority = pa.paWinWasapiThreadPriority
paWinWasapiExplicitSampleFormat = pa.paWinWasapiExplicitSampleFormat
paWinWasapiAutoConvert = pa.paWinWasapiAutoConvert
def __init__(self, flags=None):
"""
Initialize with flags. See PortAudio
documentation for more details on these parameters; they are
passed almost verbatim to the PortAudio library.
:param flags: |PaWasapiFlags| OR'ed together.
See :py:class:`PaWasapiStreamInfo`.
"""
kwargs = {"flags" : flags}
if flags == None:
del kwargs["flags"]
self._paWasapiStreamInfo = paWasapiStreamInfo(**kwargs)
def get_flags(self):
"""
Return the flags set at instantiation.
:rtype: integer
"""
return self._paWasapiStreamInfo.flags
def _get_host_api_stream_object(self):
"""Private method."""
return self._paWasapiStreamInfo
|
the-stack_0_4770 | """
File: test.py
Author: Jens Petit
Email: [email protected]
Github: https://github.com/j-petit
Description: Class for
"""
def modelParse(filename):
"""Parses the standard mplus model into single lines. Model refers to the
concept defined after the model: keyword in mplus.
Each line of the model is translated into a lines with only one dependency.
It looks for the line containing "model:" and start parsing there until the
next empty line.
Parameters
----------
filename : The mplus model file to parse
Returns
-------
new_lines : list of strings representing a single line of the model
j : the line number where the model stopped
"""
key_words = ['on', 'with']
found_model = False
with open(filename) as fp:
new_lines = []
model_line = 0
for j, line in enumerate(fp):
line = line.strip(None)
if line.lower() == "model:":
found_model = True
continue
if found_model:
if line == "":
model_line = j
break
line = line.rstrip(";")
split_line = line.split(" ")
if (("on" in line or "with" in line) and len(split_line) > 3):
if ("on" in line):
key_word = "on"
else:
key_word = "with"
index = split_line.index(key_word)
if index == 1:
r_list = split_line[2:]
for i in range(len(r_list)):
line = "{} {} {}".format(split_line[0], key_word, r_list[i])
new_lines.append(line)
else:
l_list = split_line[:index]
for i in range(len(l_list)):
line = "{} {} {}".format(l_list[i], key_word, split_line[-1])
new_lines.append(line)
else:
new_lines.append(line)
if not found_model:
raise Exception("No model found in this file")
return new_lines, j
def appendToFile(filename, model):
"""Appends a model to a file.
Parameters
----------
filename : string which specifies the path.
model : mplus model object
"""
with open(filename, 'a') as f:
f.write(model.name + ":\n")
for i, line in enumerate(model.model):
if model.labels:
f.write(line + " (" + model.labels[i] + ");\n")
else:
f.write(line + ";\n")
f.write("\n")
def combineModels(model1, model2, label, same_indices):
"""Combines the labels of two model inplace.
Parameters
----------
model1 : mplus model object
model2 : mplus model object
label : string for the combined model parts
same_indices : list of ints
"""
for i, index in enumerate(same_indices):
model1.labels[index] = label + str(i)
model2.labels[index] = label + str(i)
|
the-stack_0_4773 | """
Starling setup script.
See license in LICENSE.txt.
"""
import setuptools
import os
from starling_sim.version import __version__
# short description of the project
DESC = "Agent-based framework for mobility simulation"
# get long description from README.md
# with open("README.md", "r") as fh:
# LONG_DESC = fh.read()
LONG_DESC = "Long description of the Starling project (TODO)"
# list of classifiers from the PyPI classifiers trove
CLASSIFIERS = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: GIS",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: CeCILL-B Free Software License Agreement (CECILL-B)"
]
# only specify install_requires if not in RTD environment
if os.getenv("READTHEDOCS") == "True":
INSTALL_REQUIRES = []
else:
with open("requirements.txt") as f:
INSTALL_REQUIRES = [line.strip() for line in f.readlines()]
# call setup
setuptools.setup(
name="starling-sim",
version=__version__,
license="CECILL-B",
author="Tellae",
author_email="[email protected]",
description=DESC,
long_description=LONG_DESC,
long_description_content_type="text/markdown",
url="https://github.com/tellae/starling",
packages=setuptools.find_packages() + ["starling_sim/schemas"],
classifiers=CLASSIFIERS,
python_requires='>=3.6',
install_requires=INSTALL_REQUIRES,
include_package_data=True
)
|
the-stack_0_4775 | """
Instantiate a variation font. Run, eg:
$ fonttools varLib.mutator ./NotoSansArabic-VF.ttf wght=140 wdth=85
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.fixedTools import floatToFixedToFloat, otRound, floatToFixed
from fontTools.pens.boundsPen import BoundsPen
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
from fontTools.varLib import _GetCoordinates, _SetCoordinates
from fontTools.varLib.models import (
supportScalar,
normalizeLocation,
piecewiseLinearMap,
)
from fontTools.varLib.merger import MutatorMerger
from fontTools.varLib.varStore import VarStoreInstancer
from fontTools.varLib.mvar import MVAR_ENTRIES
from fontTools.varLib.iup import iup_delta
import fontTools.subset.cff
import os.path
import logging
log = logging.getLogger("fontTools.varlib.mutator")
# map 'wdth' axis (1..200) to OS/2.usWidthClass (1..9), rounding to closest
OS2_WIDTH_CLASS_VALUES = {}
percents = [50.0, 62.5, 75.0, 87.5, 100.0, 112.5, 125.0, 150.0, 200.0]
for i, (prev, curr) in enumerate(zip(percents[:-1], percents[1:]), start=1):
half = (prev + curr) / 2
OS2_WIDTH_CLASS_VALUES[half] = i
def interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas):
pd_blend_lists = ("BlueValues", "OtherBlues", "FamilyBlues",
"FamilyOtherBlues", "StemSnapH",
"StemSnapV")
pd_blend_values = ("BlueScale", "BlueShift",
"BlueFuzz", "StdHW", "StdVW")
for fontDict in topDict.FDArray:
pd = fontDict.Private
vsindex = pd.vsindex if (hasattr(pd, 'vsindex')) else 0
for key, value in pd.rawDict.items():
if (key in pd_blend_values) and isinstance(value, list):
delta = interpolateFromDeltas(vsindex, value[1:])
pd.rawDict[key] = otRound(value[0] + delta)
elif (key in pd_blend_lists) and isinstance(value[0], list):
"""If any argument in a BlueValues list is a blend list,
then they all are. The first value of each list is an
absolute value. The delta tuples are calculated from
relative master values, hence we need to append all the
deltas to date to each successive absolute value."""
delta = 0
for i, val_list in enumerate(value):
delta += otRound(interpolateFromDeltas(vsindex,
val_list[1:]))
value[i] = val_list[0] + delta
def interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder):
charstrings = topDict.CharStrings
for gname in glyphOrder:
# Interpolate charstring
charstring = charstrings[gname]
pd = charstring.private
vsindex = pd.vsindex if (hasattr(pd, 'vsindex')) else 0
num_regions = pd.getNumRegions(vsindex)
numMasters = num_regions + 1
new_program = []
last_i = 0
for i, token in enumerate(charstring.program):
if token == 'blend':
num_args = charstring.program[i - 1]
""" The stack is now:
..args for following operations
num_args values from the default font
num_args tuples, each with numMasters-1 delta values
num_blend_args
'blend'
"""
argi = i - (num_args*numMasters + 1)
end_args = tuplei = argi + num_args
while argi < end_args:
next_ti = tuplei + num_regions
deltas = charstring.program[tuplei:next_ti]
delta = interpolateFromDeltas(vsindex, deltas)
charstring.program[argi] += otRound(delta)
tuplei = next_ti
argi += 1
new_program.extend(charstring.program[last_i:end_args])
last_i = i + 1
if last_i != 0:
new_program.extend(charstring.program[last_i:])
charstring.program = new_program
def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc):
"""Unlike TrueType glyphs, neither advance width nor bounding box
info is stored in a CFF2 charstring. The width data exists only in
the hmtx and HVAR tables. Since LSB data cannot be interpolated
reliably from the master LSB values in the hmtx table, we traverse
the charstring to determine the actual bound box. """
charstrings = topDict.CharStrings
boundsPen = BoundsPen(glyphOrder)
hmtx = varfont['hmtx']
hvar_table = None
if 'HVAR' in varfont:
hvar_table = varfont['HVAR'].table
fvar = varfont['fvar']
varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc)
for gid, gname in enumerate(glyphOrder):
entry = list(hmtx[gname])
# get width delta.
if hvar_table:
if hvar_table.AdvWidthMap:
width_idx = hvar_table.AdvWidthMap.mapping[gname]
else:
width_idx = gid
width_delta = otRound(varStoreInstancer[width_idx])
else:
width_delta = 0
# get LSB.
boundsPen.init()
charstring = charstrings[gname]
charstring.draw(boundsPen)
if boundsPen.bounds is None:
# Happens with non-marking glyphs
lsb_delta = 0
else:
lsb = boundsPen.bounds[0]
lsb_delta = entry[1] - lsb
if lsb_delta or width_delta:
if width_delta:
entry[0] += width_delta
if lsb_delta:
entry[1] = lsb
hmtx[gname] = tuple(entry)
def instantiateVariableFont(varfont, location, inplace=False):
""" Generate a static instance from a variable TTFont and a dictionary
defining the desired location along the variable font's axes.
The location values must be specified as user-space coordinates, e.g.:
{'wght': 400, 'wdth': 100}
By default, a new TTFont object is returned. If ``inplace`` is True, the
input varfont is modified and reduced to a static font.
"""
if not inplace:
# make a copy to leave input varfont unmodified
stream = BytesIO()
varfont.save(stream)
stream.seek(0)
varfont = TTFont(stream)
fvar = varfont['fvar']
axes = {a.axisTag:(a.minValue,a.defaultValue,a.maxValue) for a in fvar.axes}
loc = normalizeLocation(location, axes)
if 'avar' in varfont:
maps = varfont['avar'].segments
loc = {k: piecewiseLinearMap(v, maps[k]) for k,v in loc.items()}
# Quantize to F2Dot14, to avoid surprise interpolations.
loc = {k:floatToFixedToFloat(v, 14) for k,v in loc.items()}
# Location is normalized now
log.info("Normalized location: %s", loc)
if 'gvar' in varfont:
log.info("Mutating glyf/gvar tables")
gvar = varfont['gvar']
glyf = varfont['glyf']
# get list of glyph names in gvar sorted by component depth
glyphnames = sorted(
gvar.variations.keys(),
key=lambda name: (
glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
if glyf[name].isComposite() else 0,
name))
for glyphname in glyphnames:
variations = gvar.variations[glyphname]
coordinates,_ = _GetCoordinates(varfont, glyphname)
origCoords, endPts = None, None
for var in variations:
scalar = supportScalar(loc, var.axes)
if not scalar: continue
delta = var.coordinates
if None in delta:
if origCoords is None:
origCoords,control = _GetCoordinates(varfont, glyphname)
endPts = control[1] if control[0] >= 1 else list(range(len(control[1])))
delta = iup_delta(delta, origCoords, endPts)
coordinates += GlyphCoordinates(delta) * scalar
_SetCoordinates(varfont, glyphname, coordinates)
else:
glyf = None
if 'cvar' in varfont:
log.info("Mutating cvt/cvar tables")
cvar = varfont['cvar']
cvt = varfont['cvt ']
deltas = {}
for var in cvar.variations:
scalar = supportScalar(loc, var.axes)
if not scalar: continue
for i, c in enumerate(var.coordinates):
if c is not None:
deltas[i] = deltas.get(i, 0) + scalar * c
for i, delta in deltas.items():
cvt[i] += otRound(delta)
if 'CFF2' in varfont:
log.info("Mutating CFF2 table")
glyphOrder = varfont.getGlyphOrder()
CFF2 = varfont['CFF2']
topDict = CFF2.cff.topDictIndex[0]
vsInstancer = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc)
interpolateFromDeltas = vsInstancer.interpolateFromDeltas
interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas)
CFF2.desubroutinize()
interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder)
interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc)
del topDict.rawDict['VarStore']
del topDict.VarStore
if 'MVAR' in varfont:
log.info("Mutating MVAR table")
mvar = varfont['MVAR'].table
varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc)
records = mvar.ValueRecord
for rec in records:
mvarTag = rec.ValueTag
if mvarTag not in MVAR_ENTRIES:
continue
tableTag, itemName = MVAR_ENTRIES[mvarTag]
delta = otRound(varStoreInstancer[rec.VarIdx])
if not delta:
continue
setattr(varfont[tableTag], itemName,
getattr(varfont[tableTag], itemName) + delta)
log.info("Mutating FeatureVariations")
for tableTag in 'GSUB','GPOS':
if not tableTag in varfont:
continue
table = varfont[tableTag].table
if not hasattr(table, 'FeatureVariations'):
continue
variations = table.FeatureVariations
for record in variations.FeatureVariationRecord:
applies = True
for condition in record.ConditionSet.ConditionTable:
if condition.Format == 1:
axisIdx = condition.AxisIndex
axisTag = fvar.axes[axisIdx].axisTag
Min = condition.FilterRangeMinValue
Max = condition.FilterRangeMaxValue
v = loc[axisTag]
if not (Min <= v <= Max):
applies = False
else:
applies = False
if not applies:
break
if applies:
assert record.FeatureTableSubstitution.Version == 0x00010000
for rec in record.FeatureTableSubstitution.SubstitutionRecord:
table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = rec.Feature
break
del table.FeatureVariations
if 'GDEF' in varfont and varfont['GDEF'].table.Version >= 0x00010003:
log.info("Mutating GDEF/GPOS/GSUB tables")
gdef = varfont['GDEF'].table
instancer = VarStoreInstancer(gdef.VarStore, fvar.axes, loc)
merger = MutatorMerger(varfont, loc)
merger.mergeTables(varfont, [varfont], ['GDEF', 'GPOS'])
# Downgrade GDEF.
del gdef.VarStore
gdef.Version = 0x00010002
if gdef.MarkGlyphSetsDef is None:
del gdef.MarkGlyphSetsDef
gdef.Version = 0x00010000
if not (gdef.LigCaretList or
gdef.MarkAttachClassDef or
gdef.GlyphClassDef or
gdef.AttachList or
(gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)):
del varfont['GDEF']
addidef = False
if glyf:
for glyph in glyf.glyphs.values():
if hasattr(glyph, "program"):
instructions = glyph.program.getAssembly()
# If GETVARIATION opcode is used in bytecode of any glyph add IDEF
addidef = any(op.startswith("GETVARIATION") for op in instructions)
if addidef:
break
if addidef:
log.info("Adding IDEF to fpgm table for GETVARIATION opcode")
asm = []
if 'fpgm' in varfont:
fpgm = varfont['fpgm']
asm = fpgm.program.getAssembly()
else:
fpgm = newTable('fpgm')
fpgm.program = ttProgram.Program()
varfont['fpgm'] = fpgm
asm.append("PUSHB[000] 145")
asm.append("IDEF[ ]")
args = [str(len(loc))]
for a in fvar.axes:
args.append(str(floatToFixed(loc[a.axisTag], 14)))
asm.append("NPUSHW[ ] " + ' '.join(args))
asm.append("ENDF[ ]")
fpgm.program.fromAssembly(asm)
# Change maxp attributes as IDEF is added
if 'maxp' in varfont:
maxp = varfont['maxp']
if hasattr(maxp, "maxInstructionDefs"):
maxp.maxInstructionDefs += 1
else:
setattr(maxp, "maxInstructionDefs", 1)
if hasattr(maxp, "maxStackElements"):
maxp.maxStackElements = max(len(loc), maxp.maxStackElements)
else:
setattr(maxp, "maxInstructionDefs", len(loc))
if 'name' in varfont:
log.info("Pruning name table")
exclude = {a.axisNameID for a in fvar.axes}
for i in fvar.instances:
exclude.add(i.subfamilyNameID)
exclude.add(i.postscriptNameID)
varfont['name'].names[:] = [
n for n in varfont['name'].names
if n.nameID not in exclude
]
if "wght" in location and "OS/2" in varfont:
varfont["OS/2"].usWeightClass = otRound(
max(1, min(location["wght"], 1000))
)
if "wdth" in location:
wdth = location["wdth"]
for percent, widthClass in sorted(OS2_WIDTH_CLASS_VALUES.items()):
if wdth < percent:
varfont["OS/2"].usWidthClass = widthClass
break
else:
varfont["OS/2"].usWidthClass = 9
if "slnt" in location and "post" in varfont:
varfont["post"].italicAngle = max(-90, min(location["slnt"], 90))
log.info("Removing variable tables")
for tag in ('avar','cvar','fvar','gvar','HVAR','MVAR','VVAR','STAT'):
if tag in varfont:
del varfont[tag]
return varfont
def main(args=None):
from fontTools import configLogger
import argparse
parser = argparse.ArgumentParser(
"fonttools varLib.mutator", description="Instantiate a variable font")
parser.add_argument(
"input", metavar="INPUT.ttf", help="Input variable TTF file.")
parser.add_argument(
"locargs", metavar="AXIS=LOC", nargs="*",
help="List of space separated locations. A location consist in "
"the name of a variation axis, followed by '=' and a number. E.g.: "
" wght=700 wdth=80. The default is the location of the base master.")
parser.add_argument(
"-o", "--output", metavar="OUTPUT.ttf", default=None,
help="Output instance TTF file (default: INPUT-instance.ttf).")
logging_group = parser.add_mutually_exclusive_group(required=False)
logging_group.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely.")
logging_group.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off.")
options = parser.parse_args(args)
varfilename = options.input
outfile = (
os.path.splitext(varfilename)[0] + '-instance.ttf'
if not options.output else options.output)
configLogger(level=(
"DEBUG" if options.verbose else
"ERROR" if options.quiet else
"INFO"))
loc = {}
for arg in options.locargs:
try:
tag, val = arg.split('=')
assert len(tag) <= 4
loc[tag.ljust(4)] = float(val)
except (ValueError, AssertionError):
parser.error("invalid location argument format: %r" % arg)
log.info("Location: %s", loc)
log.info("Loading variable font")
varfont = TTFont(varfilename)
instantiateVariableFont(varfont, loc, inplace=True)
log.info("Saving instance font %s", outfile)
varfont.save(outfile)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
sys.exit(main())
import doctest
sys.exit(doctest.testmod().failed)
|
the-stack_0_4776 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import time
from datetime import datetime
from decimal import Decimal
try:
from string import letters
except ImportError:
from string import ascii_letters as letters
from odps.tests.core import TestBase as Base, to_str, tn, pandas_case
from odps import types
from odps.df.backends.frame import ResultFrame
class TestBase(Base):
def _gen_random_bigint(self, value_range=None):
return random.randint(*(value_range or types.bigint._bounds))
def _gen_random_string(self, max_length=15):
gen_letter = lambda: letters[random.randint(0, 51)]
return to_str(''.join([gen_letter() for _ in range(random.randint(1, max_length))]))
def _gen_random_double(self):
return random.uniform(-2**32, 2**32)
def _gen_random_datetime(self):
dt = datetime.fromtimestamp(random.randint(0, int(time.time())))
if dt.year >= 1986 or dt.year <= 1992: # ignore years when daylight saving time is used
return dt.replace(year=1996)
else:
return dt
def _gen_random_boolean(self):
return random.uniform(-1, 1) > 0
def _gen_random_decimal(self):
return Decimal(str(self._gen_random_double()))
def assertListAlmostEqual(self, first, second, **kw):
self.assertEqual(len(first), len(second))
only_float = kw.pop('only_float', True)
for f, s in zip(first, second):
if only_float:
self.assertAlmostEqual(f, s, **kw)
else:
if isinstance(f, float) and isinstance(s, float):
self.assertAlmostEqual(f, s, **kw)
elif isinstance(f, list) and isinstance(s, list):
self.assertListAlmostEqual(f, s, only_float=False, **kw)
else:
self.assertEqual(f, s)
__all__ = ['TestBase', 'to_str', 'tn', 'pandas_case']
|
the-stack_0_4777 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 14:03:22 2019
@author: Rafael Arenhart
"""
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
'''
use '%matplotlib qt5' no console IPython para abrir o gráfico interativo
'''
PI = np.pi
SAMPLES = 1000
theta = np.random.random(SAMPLES) * 2 * PI
polar_rho = np.random.random(SAMPLES) * 2 * PI
uniform_rho = np.arccos(2*np.random.random(SAMPLES) - 1)
def polar_to_cartesian(theta, rho, radius=1):
x = np.sin(rho) * np.cos(theta)
y = np.sin(rho) * np.sin(theta)
z = np.cos(rho)
return (x, y, z)
fig = plt.figure()
axs = [fig.add_subplot(121, projection='3d'),
fig.add_subplot(122, projection='3d')]
axs[0].set_axis_off()
axs[0].set_title('Concentrado')
axs[1].set_axis_off()
axs[1].set_title('Distribuído')
polar_points = polar_to_cartesian(theta, polar_rho)
#ax.scatter(polar_points[0], polar_points[1], polar_points[2])
#plt.show()
uniform_points = polar_to_cartesian(theta, uniform_rho)
#ax.scatter(uniform_points[0], uniform_points[1], uniform_points[2])
#plt.show()
#fig, axs = plt.subplots(2)
axs[0].scatter(polar_points[0], polar_points[1], polar_points[2])
axs[1].scatter(uniform_points[0], uniform_points[1], uniform_points[2])
plt.show()
|
the-stack_0_4780 | import get_pic_from_video as gp
import directory_tree_traversal
import scor_onlie
import op_log
import ftp_op
import time
import configparser
import os.path
# 记录开始运行时间
start_time = time.strftime("%Y-%m-%d %X", time.localtime())
try:
# 读取配置文件
conf = configparser.ConfigParser()
conf.read('arr_config.ini')
# 要处理的本地目录
local_path = conf.get("local", "local_path")
# 筛选文件类型
file_type = conf.get("local", "video_type")
video_type = [x for x in file_type.split(', ')]
# 截图存放位置
save_picture_path = conf.get("local", "save_picture_path")
# 成功上传ftp后,是否删除本地的文件
del_localfile = conf.get("local", "del_file_afterupload")
# nswf 接口地址
scor_url = conf.get("nsfw", "nsfw_api")
# 截图总数
pic_cnt = int(conf.get("nsfw", "pic_cnt"))
# 判断图片是否为 NSFW 的阈值
threshold = conf.get("nsfw", "threshold")
# ftp 信息
ip = conf.get("ftp", "ip")
port = conf.get("ftp", "port")
username = conf.get("ftp", "username")
pwd = conf.get("ftp", "pwd")
except Exception as write_err:
op_log.log_to_file("读取配置文件失败. %s" % write_err)
try:
# 要上传文件的ftp配置
ftp_conn = ftp_op.ftpconnect(ip, int(port), username, pwd)
except:
print("ftp 连接失败(%s:%s %s %s)" % (ip, port, username, pwd))
op_log.log_to_file("ftp 连接失败(%s:%s %s %s)" % (ip, port, username, pwd))
exit(1)
try:
# 连接数据库,记录操作日志
tb_name = 'video_arrange'
db_name = 'arr_file.db3'
con, cur = op_log.open_sqlite(db_name, tb_name)
# 获取本地指定类型的文件列表
videos = directory_tree_traversal.file_list(local_path, video_type)
if not videos:
print('get no file')
log_txt = "start at %s , but get no file." % (start_time)
op_log.log_to_file(log_txt)
exit(0)
cnt = 0
for local_file in videos:
cnt += 1
print("-->:handling %s of %s " % (cnt, len(videos)))
# 获取文件截图
if not os.path.exists(save_picture_path):
os.makedirs(save_picture_path)
images = gp.get_frame(local_file, save_picture_path, pic_cnt)
if not images:
op_log.log_to_file("%s 获取文件截图失败" % local_file)
continue
# 记录超过阈值的个数
scors_cnt = 0
nsfw_flag = 0
for ims in images:
scors = scor_onlie.scor(scor_url, ims)
if float(scors) > float(threshold):
scors_cnt += 1
success = 0
if scors_cnt > 1:
nsfw_flag = 1
# 上传文件到ftp
remote_file = os.path.split(local_file)[-1]
upresult = ftp_op.uploadfile(ftp_conn, local_file, remote_file)
if upresult is True:
result_txt = local_file + '-- 上传ftp成功'
success = 1
if int(del_localfile) == 1:
os.remove(local_file)
op_log.log_to_file("删除文件:%s" % local_file)
else:
result_txt = local_file + '-- 上传ftp失败: ' + upresult
success = 0
# op_log.log_to_file(result_txt)
txt = "%s| |upfile: %s| | %s" % (str(time.asctime()), local_file, result_txt)
data = (os.path.split(local_file)[-1], local_file, int(nsfw_flag), result_txt, success, time.strftime("%Y-%m-%d %X", time.localtime()), '')
else:
result_txt = "不是NSFW文件,不上传"
data = (os.path.split(local_file)[-1], local_file, int(nsfw_flag), result_txt, success, time.strftime("%Y-%m-%d %X", time.localtime()), '')
op_log.insert_data(con, cur, tb_name, data)
con.commit()
end_time = time.strftime("%Y-%m-%d %X", time.localtime())
log_txt = "complete | start at %s , finish at %s . handle %s files." % (start_time, end_time, cnt)
op_log.log_to_file(log_txt)
except Exception as op_err:
op_log.log_to_file("操作失败: %s" % op_err)
finally:
ftp_conn.close()
cur.close()
con.close()
|
the-stack_0_4782 | from flask_login import login_required
from ui import app
from flask import Response, request
from utils.json_encoder import JSONEncoder
from service.job_service import cancel_job, dao_list_jobs, dao_count_jobs, get_job_dao
__author__ = 'tomas'
# returns all the jobs (and their state)
# @app.route("/api/workspace/<workspace_id>/job", methods=["GET"])
# @login_required
# def get_jobs_api(workspace_id):
# in_doc = get_jobs_by_workspace(workspace_id)
# out_doc = JSONEncoder().encode(in_doc)
# return Response(out_doc, mimetype="application/json")
@app.route("/api/workspace/<workspace_id>/job/<job_id>", methods=["GET"])
@login_required
def get_job_by_id_api(workspace_id, job_id):
job = get_job_dao(job_id)
out_doc = JSONEncoder().encode(job)
return Response(out_doc, mimetype="application/json")
@app.route("/api/workspace/<workspace_id>/job/<job_id>", methods=["DELETE"])
@login_required
def delete_job_api(workspace_id, job_id):
cancel_job(job_id)
return Response("{}", mimetype="application/json")
@app.route("/api/workspace/<workspace_id>/job", methods=['GET'])
@login_required
def get_jobs_api(workspace_id):
search_query = {}
search_query["workspace_id"] = workspace_id
if request.args.get('orderBy') is not None:
order = request.args.get('orderBy')
if order[:1] == "-":
order_by = order[1:]
reverse = -1
else:
order_by = order
reverse = 1
else:
order_by = "score"
reverse = 1
search_query["orderBy"] = order_by
search_query["reverse"] = reverse
search = None
if request.args.get('search') is not None:
search = request.args.get('search')
search_query["search_text"] = search
if request.args.get('limit') is not None:
limit = int(request.args.get('limit'))
search_query["limit"] = limit
else:
search_query["limit"] = 10
limit = 10
begin = 1
if request.args.get('page') is not None:
begin = int(request.args.get('page'))
search_query["begin"] = (begin - 1) * limit
# in_doc = list_workspace(search_query)
# count = dao_count_workspace()
in_doc = dao_list_jobs(search_query)
count = dao_count_jobs(search_query)
results = {}
results["count"] = count
results["list"] = in_doc
out_doc = JSONEncoder().encode(results)
return Response(out_doc, mimetype="application/json")
|
the-stack_0_4788 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
__all__ = ["Move"]
class Move(object):
def tune(self, state, accepted):
pass
def update(self,
old_state,
new_state,
accepted,
subset=None):
"""Update a given subset of the ensemble with an accepted proposal
Args:
coords: The original ensemble coordinates.
log_probs: The original log probabilities of the walkers.
blobs: The original blobs.
new_coords: The proposed coordinates.
new_log_probs: The proposed log probabilities.
new_blobs: The proposed blobs.
accepted: A vector of booleans indicating which walkers were
accepted.
subset (Optional): A boolean mask indicating which walkers were
included in the subset. This can be used, for example, when
updating only the primary ensemble in a :class:`RedBlueMove`.
"""
if subset is None:
subset = np.ones(len(old_state.coords), dtype=bool)
m1 = subset & accepted
m2 = accepted[subset]
old_state.coords[m1] = new_state.coords[m2]
old_state.log_prob[m1] = new_state.log_prob[m2]
if new_state.blobs is not None:
if old_state.blobs is None:
raise ValueError(
"If you start sampling with a given log_prob, "
"you also need to provide the current list of "
"blobs at that position.")
old_state.blobs[m1] = new_state.blobs[m2]
return old_state
|
the-stack_0_4789 | import pydicom
from pydicom.filereader import read_dicomdir, read_dataset
from pydicom.data import get_testdata_files
from pydicom.errors import InvalidDicomError
from os.path import dirname, join
from pprint import pprint
import matplotlib.pyplot as plt
from pydicom.dataset import Dataset
from pathlib import Path
from plugins.dicom_loader.dicom_record import DicomDir, DicomPatient, DicomStudy, DicomSeries, DicomImage
class DicomLoader:
def __init__(self):
pass
def load_dicom(self, dicom_path: Path):
dicom_dir = DicomDir(dicom_path)
file_paths = [file_path for file_path in dicom_path.glob('**/*') if file_path.is_file()]
for file_path in file_paths:
try:
file_dataset = pydicom.dcmread(str(file_path))
except InvalidDicomError as exception:
print('DICOM file loading exception:', exception, '\n\tFile:', file_path)
continue
# print('file_dataset', file_dataset)
if file_dataset.PatientID not in dicom_dir.children:
dicom_dir.children[file_dataset.PatientID] = DicomPatient(file_dataset.PatientID)
patient_record = dicom_dir.children[file_dataset.PatientID]
if file_dataset.StudyID not in patient_record.children:
patient_record.children[file_dataset.StudyID] = DicomStudy(file_dataset.StudyID)
study_record = patient_record.children[file_dataset.StudyID]
if file_dataset.SeriesNumber not in study_record.children:
study_record.children[file_dataset.SeriesNumber] = DicomSeries(file_dataset.SeriesNumber)
series_record = study_record.children[file_dataset.SeriesNumber]
series_record.children[file_path.name] = DicomImage(file_path.name, file_dataset)
# print(dicom_dir)
return dicom_dir
def load_dicom_dir_file(self):
# plt.imshow(dcm_dataset.pixel_array, cmap=plt.cm.bone)
# plt.show()
# fetch the path to the test data
# filepath = get_testdata_files('DICOMDIR')[0]
# filepath = 'D:/Projects/C++/Qt/5/BodySnitches/Builds/BodySnitches/!DicomDatasets/FantasticNine/09-Kydryavcev/2011.12.09/DICOMDIR'
filepath = 'd:/Projects/BodySnitches/Builds/BodySnitches/DicomDatasets/FantasticNine/09-Kydryavcev/2011.12.09/DICOMDIR'
print('Path to the DICOM directory: {}'.format(filepath))
# load the data
dicom_dir = read_dicomdir(filepath)
print('dicom_dir', dicom_dir)
base_dir = dirname(filepath)
print('base_dir', base_dir)
# go through the patient record and print information
print('patient_records type', type(dicom_dir.patient_records))
for patient_record in dicom_dir.patient_records:
print('rrr:', type(patient_record))
if (hasattr(patient_record, 'PatientID') and
hasattr(patient_record, 'PatientName')):
print("Patient: {}: {}".format(patient_record.PatientID,
patient_record.PatientName))
studies = patient_record.children
# got through each serie
for study in studies:
print('sss:', type(study))
print(" " * 4 + "Study {}: {}: {}".format(study.StudyID,
study.StudyDate,
study.StudyDescription))
all_series = study.children
# go through each serie
for series in all_series:
image_count = len(series.children)
plural = ('', 's')[image_count > 1]
# Write basic series info and image count
# Put N/A in if no Series Description
if 'SeriesDescription' not in series:
series.SeriesDescription = "N/A"
print(" " * 8 + "Series {}: {}: {} ({} image{})".format(
series.SeriesNumber, series.Modality, series.SeriesDescription,
image_count, plural))
# Open and read something from each image, for demonstration
# purposes. For file quick overview of DICOMDIR, leave the
# following out
print(" " * 12 + "Reading images...")
image_records = series.children
image_filenames = [join(base_dir, *image_rec.ReferencedFileID)
for image_rec in image_records]
datasets = [pydicom.dcmread(image_filename)
for image_filename in image_filenames]
patient_names = set(ds.PatientName for ds in datasets)
patient_IDs = set(ds.PatientID for ds in datasets)
# List the image filenames
print("\n" + " " * 12 + "Image filenames:")
print(" " * 12, end=' ')
pprint(image_filenames, indent=12)
# Expect all images to have same patient name, id
# Show the set of all names, IDs found (should each have one)
print(" " * 12 + "Patient Names in images..: {}".format(
patient_names))
print(" " * 12 + "Patient IDs in images..: {}".format(
patient_IDs))
|
the-stack_0_4791 | #!/usr/bin/env python2.7
"""
Updates 'gene_id' entries in a GTF file downloaded from UCSC Table Browser
to have gene IDs as values instead of transcript IDs.
Two types annotation sources can be used to replace the 'gene_id' values:
1. Local annotation source. To use this, supply a file name to the
'--local' argument. The file must only have two columns denoting
the transcript - gene ID mapping (the first column contain the
transcript IDs).
2. Remote annotation source (UCSC). To use this, supply the UCSC
database to use (e.g. 'hg19') to the '--db' argument and the annotation
source to the '--annot' argument. Annotation source is either 'ucsc',
'ensembl', 'refseq', 'gencode14', or 'gencode17'.
You can only choose local or remote sources, not both.
Requirements:
* Python == 2.7.x
* MySQLdb >= 1.2.3
* track >= 1.3.0-dev (dev version from github.com/xapple/track)
Copyright (c) 2013 Wibowo Arindrarto <[email protected]>
Copyright (c) 2013 LUMC Sequencing Analysis Support Core <[email protected]>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = ('0', '1', )
__version__ = '.'.join(__version_info__)
__version__ += '-dev' if not RELEASE else ''
import argparse
import os
import warnings
import track
import MySQLdb
# Credentials for connecting to database
CRED = {
'host': 'genome-mysql.cse.ucsc.edu',
'user': 'genome',
}
# Queries that return ('transcript ID', 'gene ID') on various tables
QUERIES = {
'ucsc': 'SELECT knownGene.name, kgXref.geneSymbol FROM ' \
'knownGene, kgXref WHERE knownGene.name = kgID',
'ensembl': 'SELECT name, name2 FROM ensGene',
'refseq': 'SELECT name, name2 FROM refGene',
'gencode17': 'SELECT name, name2 FROM wgEncodeGencodeBasicV17',
'gencode14': 'SELECT name, name2 FROM wgEncodeGencodeBasicV14',
}
def get_ucsc_transcript_gene_mapping(annot_src, db, cred=CRED):
"""Returns the transcript-gene name mapping for an annotation source
from a given database source.
The function does not know whether the annotation source exists within
the given database nor does it do any check before trying connect.
:param annot_src: name of annotation source
:type annot_src: str
:param db: UCSC database name to use
:param cred: database login credentials, must contain entries for at
least 'host' and 'user', defaults to credentials for
public UCSC server
:type cred: dict
:returns: transcript-gene name mapping
:rtype: dict(str: str)
"""
con = MySQLdb.connect(db=db, **cred)
cur = con.cursor()
cur.execute(QUERIES[annot_src])
return {tid: gid for tid, gid in cur.fetchall()}
def get_local_transcript_gene_mapping(fname):
"""Loads a two-column file (transcript ID - gene ID) as a dict.
:param fname: path to file
:type fname: str
:returns: mapping of column 1 and column 2 in the file
:rtype: dict(str: str)
"""
mapping = {}
with open(fname, 'r') as src:
for line in src:
if not line.strip():
break
elif not line:
continue
key, value = line.strip().split()
if key in mapping:
if value != mapping[key]:
raise ValueError("Duplicate transcript ID ({0}) with "
"ambiguous gene ID ({1} vs {2}).".format(
key, value, mapping[key]))
mapping[key] = value
return mapping
def update_gene_id_attr(chrom_recs, mapping):
"""Given an iterator for `track` records, yield `track` records with
updated gene ID.
:param chrom_recs: iterator returning `track` records for one chromosome
:type chrom_recs: iterator
:returns: generator yielding single `track` records
:rtype: (yield) `track.pyrow.SuperRow`
"""
for rec in chrom_recs:
data = list(rec.data)
# gene ID is always at index 7 (first index of attributes)
init_gid = data[7]
try:
map_gid = mapping[init_gid]
except KeyError:
warnings.warn("Transcript ID {0} not found in the given "
"mapping, initial value is left unchanged.".format(
init_gid))
else:
data[7] = map_gid
yield data
def ucsc_geneid_fix(in_gtf, out_gtf, remote=None, local=None):
"""Updates 'gene_id' entries in GTF files downloaded from UCSC
Table Browser to contain gene IDs instead of transcript IDs.
If the output GTF file name already exists, it will be overwritten.
:param in_gtf: path to input GTF file
:type in_gtf: str
:param out_gtf: path to output GTF file
:type out_gtf: str
:param remote: UCSC database and annotation source to use
:type remote: dict('db': str, 'annot_src': str)
:param local: two-column file name containing transcript-gene mapping,
only when `db` and `annot_src` are None
:type local: str
:returns: None
"""
# remote not defined
if remote is None:
# then local must be defined
if local is None:
raise ValueError("Missing `remote` or `local` arguments")
mapping = get_local_transcript_gene_mapping(local)
# remote defined
else:
# then local can not be defined
if local is not None:
raise ValueError("Only supply `remote` or `local` argument, "
"not both.")
# remote must have 'db'
if 'db' not in remote:
raise ValueError("Missing remote database name")
# and 'annot_src'
if 'annot' not in remote:
raise ValueError("Missing remote annotation source name")
db = remote['db']
annot = remote['annot']
if annot not in QUERIES.keys():
raise ValueError("Invalid annotation source "
"name: {0}".format(annot))
mapping = get_ucsc_transcript_gene_mapping(annot, db, cred=CRED)
# remove output file if it exists
if os.path.exists(out_gtf):
os.remove(out_gtf)
with track.load(in_gtf, readonly=True) as in_track, \
track.new(out_gtf, format='gtf') as out_track:
# since GTF has custom fields, need to set the out_track to use
# in_track's fields
out_track.fields = in_track.fields
for chrom in in_track.chromosomes:
chrom_rec = in_track.read(chrom)
out_track.write(chrom, update_gene_id_attr(chrom_rec, mapping))
if __name__ == '__main__':
usage = __doc__.split('\n\n\n')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage[0], epilog=usage[1])
parser.add_argument('input', type=str, help='Path to input GTF file')
parser.add_argument('output', type=str, help='Path to output GTF file')
parser.add_argument('--local', type=str, dest='local', default=None,
help='Path to transcript ID - gene ID mapping file')
parser.add_argument('--db', type=str, dest='db', default=None,
help='UCSC database name to use')
parser.add_argument('--annot', type=str, dest='annot', default=None,
choices=QUERIES.keys(), help='UCSC annotation source')
parser.add_argument('--version', action='version', version='%(prog)s ' +
__version__)
args = parser.parse_args()
remote = None
if args.db is not None or args.annot is not None:
remote = {'db': args.db, 'annot': args.annot}
ucsc_geneid_fix(args.input, args.output, remote=remote, local=args.local)
|
the-stack_0_4792 | from typing import Optional
import tensorflow as tf
from kerastuner.applications import resnet
from kerastuner.applications import xception
from tensorflow.keras import layers
from tensorflow.python.util import nest
from autokeras.blocks import reduction
from autokeras.engine import block as block_module
from autokeras.utils import layer_utils
from autokeras.utils import utils
def set_hp_value(hp, name, value):
full_name = hp._get_name(name)
hp.values[full_name] = value or hp.values[full_name]
class DenseBlock(block_module.Block):
"""Block for Dense layers.
# Arguments
num_layers: Int. The number of Dense layers in the block.
If left unspecified, it will be tuned automatically.
use_bn: Boolean. Whether to use BatchNormalization layers.
If left unspecified, it will be tuned automatically.
dropout_rate: Float. The dropout rate for the layers.
If left unspecified, it will be tuned automatically.
"""
def __init__(self,
num_layers: Optional[int] = None,
use_batchnorm: Optional[bool] = None,
dropout_rate: Optional[float] = None,
**kwargs):
super().__init__(**kwargs)
self.num_layers = num_layers
self.use_batchnorm = use_batchnorm
self.dropout_rate = dropout_rate
def get_config(self):
config = super().get_config()
config.update({
'num_layers': self.num_layers,
'use_batchnorm': self.use_batchnorm,
'dropout_rate': self.dropout_rate})
return config
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
output_node = input_node
output_node = reduction.Flatten().build(hp, output_node)
num_layers = self.num_layers or hp.Choice('num_layers', [1, 2, 3], default=2)
use_batchnorm = self.use_batchnorm
if use_batchnorm is None:
use_batchnorm = hp.Boolean('use_batchnorm', default=False)
if self.dropout_rate is not None:
dropout_rate = self.dropout_rate
else:
dropout_rate = hp.Choice('dropout_rate', [0.0, 0.25, 0.5], default=0)
for i in range(num_layers):
units = hp.Choice(
'units_{i}'.format(i=i),
[16, 32, 64, 128, 256, 512, 1024],
default=32)
output_node = layers.Dense(units)(output_node)
if use_batchnorm:
output_node = layers.BatchNormalization()(output_node)
output_node = layers.ReLU()(output_node)
if dropout_rate > 0:
output_node = layers.Dropout(dropout_rate)(output_node)
return output_node
class RNNBlock(block_module.Block):
"""An RNN Block.
# Arguments
return_sequences: Boolean. Whether to return the last output in the
output sequence, or the full sequence. Defaults to False.
bidirectional: Boolean. Bidirectional RNN. If left unspecified, it will be
tuned automatically.
num_layers: Int. The number of layers in RNN. If left unspecified, it will
be tuned automatically.
layer_type: String. 'gru' or 'lstm'. If left unspecified, it will be tuned
automatically.
"""
def __init__(self,
return_sequences: bool = False,
bidirectional: Optional[bool] = None,
num_layers: Optional[int] = None,
layer_type: Optional[int] = None,
**kwargs):
super().__init__(**kwargs)
self.return_sequences = return_sequences
self.bidirectional = bidirectional
self.num_layers = num_layers
self.layer_type = layer_type
def get_config(self):
config = super().get_config()
config.update({
'return_sequences': self.return_sequences,
'bidirectional': self.bidirectional,
'num_layers': self.num_layers,
'layer_type': self.layer_type})
return config
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
shape = input_node.shape.as_list()
if len(shape) != 3:
raise ValueError(
'Expect the input tensor to have '
'at least 3 dimensions for rnn models, '
'but got {shape}'.format(shape=input_node.shape))
feature_size = shape[-1]
output_node = input_node
bidirectional = self.bidirectional
if bidirectional is None:
bidirectional = hp.Boolean('bidirectional', default=True)
layer_type = self.layer_type or hp.Choice('layer_type',
['gru', 'lstm'],
default='lstm')
num_layers = self.num_layers or hp.Choice('num_layers',
[1, 2, 3],
default=2)
rnn_layers = {
'gru': layers.GRU,
'lstm': layers.LSTM
}
in_layer = rnn_layers[layer_type]
for i in range(num_layers):
return_sequences = True
if i == num_layers - 1:
return_sequences = self.return_sequences
if bidirectional:
output_node = layers.Bidirectional(
in_layer(feature_size,
return_sequences=return_sequences))(output_node)
else:
output_node = in_layer(
feature_size,
return_sequences=return_sequences)(output_node)
return output_node
class ConvBlock(block_module.Block):
"""Block for vanilla ConvNets.
# Arguments
kernel_size: Int. If left unspecified, it will be tuned automatically.
num_blocks: Int. The number of conv blocks, each of which may contain
convolutional, max pooling, dropout, and activation. If left unspecified,
it will be tuned automatically.
num_layers: Int. The number of convolutional layers in each block. If left
unspecified, it will be tuned automatically.
max_pooling: Boolean. Whether to use max pooling layer in each block. If left
unspecified, it will be tuned automatically.
separable: Boolean. Whether to use separable conv layers.
If left unspecified, it will be tuned automatically.
dropout_rate: Float. Between 0 and 1. The dropout rate for after the
convolutional layers. If left unspecified, it will be tuned
automatically.
"""
def __init__(self,
kernel_size: Optional[int] = None,
num_blocks: Optional[int] = None,
num_layers: Optional[int] = None,
max_pooling: Optional[bool] = None,
separable: Optional[bool] = None,
dropout_rate: Optional[float] = None,
**kwargs):
super().__init__(**kwargs)
self.kernel_size = kernel_size
self.num_blocks = num_blocks
self.num_layers = num_layers
self.max_pooling = max_pooling
self.separable = separable
self.dropout_rate = dropout_rate
def get_config(self):
config = super().get_config()
config.update({
'kernel_size': self.kernel_size,
'num_blocks': self.num_blocks,
'num_layers': self.num_layers,
'max_pooling': self.max_pooling,
'separable': self.separable,
'dropout_rate': self.dropout_rate})
return config
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
output_node = input_node
kernel_size = self.kernel_size or hp.Choice('kernel_size',
[3, 5, 7],
default=3)
num_blocks = self.num_blocks or hp.Choice('num_blocks',
[1, 2, 3],
default=2)
num_layers = self.num_layers or hp.Choice('num_layers',
[1, 2],
default=2)
separable = self.separable
if separable is None:
separable = hp.Boolean('separable', default=False)
if separable:
conv = layer_utils.get_sep_conv(input_node.shape)
else:
conv = layer_utils.get_conv(input_node.shape)
max_pooling = self.max_pooling
if max_pooling is None:
max_pooling = hp.Boolean('max_pooling', default=True)
pool = layer_utils.get_max_pooling(input_node.shape)
if self.dropout_rate is not None:
dropout_rate = self.dropout_rate
else:
dropout_rate = hp.Choice('dropout_rate', [0.0, 0.25, 0.5], default=0)
for i in range(num_blocks):
for j in range(num_layers):
output_node = conv(
hp.Choice('filters_{i}_{j}'.format(i=i, j=j),
[16, 32, 64, 128, 256, 512],
default=32),
kernel_size,
padding=self._get_padding(kernel_size, output_node),
activation='relu')(output_node)
if max_pooling:
output_node = pool(
kernel_size - 1,
padding=self._get_padding(kernel_size - 1,
output_node))(output_node)
if dropout_rate > 0:
output_node = layers.Dropout(dropout_rate)(output_node)
return output_node
@staticmethod
def _get_padding(kernel_size, output_node):
if all([kernel_size * 2 <= length
for length in output_node.shape[1:-1]]):
return 'valid'
return 'same'
class MultiHeadSelfAttention(block_module.Block):
"""Block for Multi-Head Self-Attention.
# Arguments
head_size: Int. Dimensionality of the `query`, `key` and `value` tensors
after the linear transformation. If left unspecified, it will be
tuned automatically.
num_heads: Int. The number of attention heads. If left unspecified,
it will be tuned automatically.
"""
def __init__(self,
head_size: Optional[int] = None,
num_heads: Optional[int] = 8,
**kwargs):
super().__init__(**kwargs)
self.head_size = head_size
self.num_heads = num_heads
def get_config(self):
config = super().get_config()
config.update({
'head_size': self.head_size,
'num_heads': self.num_heads})
return config
def build(self, hp, inputs=None):
"""
# Arguments
hp: HyperParameters. The hyperparameters for building the model.
inputs: Tensor of Shape [batch_size, seq_len, embedding_dim]
# Returns
Self-Attention outputs of shape `[batch_size, seq_len, embedding_dim]`.
"""
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
shape = input_node.shape.as_list()
if len(shape) != 3:
raise ValueError(
'Expect the input tensor to have '
'3 dimensions for multi-head self-attention, '
'but got {shape}'.format(shape=input_node.shape))
# input.shape = [batch_size, seq_len, embedding_dim]
head_size = self.head_size or hp.Choice(
'head_size',
[32, 64, 128, 256, 512],
default=128)
num_heads = self.num_heads
if num_heads is None:
num_heads = 8
if head_size % num_heads != 0: # how to evaluate this condition
raise ValueError(
f"embedding dimension = {head_size} should be "
f"divisible by number of heads = {num_heads}"
)
projection_dim = head_size // num_heads
query_dense = layers.Dense(head_size)
key_dense = layers.Dense(head_size)
value_dense = layers.Dense(head_size)
combine_heads = layers.Dense(head_size)
batch_size = tf.shape(input_node)[0]
query = query_dense(input_node) # (batch_size, seq_len, head_size)
key = key_dense(input_node) # (batch_size, seq_len, head_size)
value = value_dense(input_node) # (batch_size, seq_len, head_size)
query, key, value = [self.separate_heads(
var, batch_size, num_heads, projection_dim
) for var in [query, key, value]]
attention, weights = self.attention(query, key, value)
attention = tf.transpose(
attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len, num_heads, projection_dim)
concat_attention = tf.reshape(
attention, (batch_size, tf.shape(attention)[1], self.head_size)
) # (batch_size, seq_len, head_size)
output = combine_heads(
concat_attention
) # (batch_size, seq_len, head_size)
return output
@staticmethod
def attention(query, key, value):
score = tf.matmul(query, key, transpose_b=True)
dim_key = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_score = score / tf.math.sqrt(dim_key)
weights = tf.nn.softmax(scaled_score, axis=-1)
output = tf.matmul(weights, value)
return output, weights
@staticmethod
def separate_heads(x, batch_size, num_heads, projection_dim):
x = tf.reshape(x, (batch_size, -1, num_heads, projection_dim))
return tf.transpose(x, perm=[0, 2, 1, 3])
class Transformer(block_module.Block):
"""Block for Transformer.
The input should be tokenized sequences with the same length, where each element
of a sequence should be the index of the word.
# Example
```python
# Using the Transformer Block with AutoModel.
import autokeras as ak
from tensorflow.keras import losses
text_input = ak.TextInput()
output_node = ak.TextToIntSequence(output_sequence_length=200)(text_input)
output_node = ak.Transformer(embedding_dim=32,
pretraining='none',
num_heads=2,
dense_dim=32,
dropout_rate = 0.25)(output_node)
output_node = ak.SpatialReduction(reduction_type='global_avg')(output_node)
output_node = ak.DenseBlock(num_layers=1, use_batchnorm = False)(output_node)
output_node = ak.ClassificationHead(
loss=losses.SparseCategoricalCrossentropy(),
dropout_rate = 0.25)(output_node)
clf = ak.AutoModel(inputs=text_input, outputs=output_node, max_trials=2)
```
# Arguments
max_features: Int. Size of the vocabulary. Must be set if not using
TextToIntSequence before this block. Defaults to 20001.
pretraining: String. 'random' (use random weights instead any pretrained
model), 'glove', 'fasttext' or 'word2vec'. Use pretrained word embedding.
If left unspecified, it will be tuned automatically.
embedding_dim: Int. Output dimension of the Attention block.
If left unspecified, it will be tuned automatically.
num_heads: Int. The number of attention heads. If left unspecified,
it will be tuned automatically.
dense_dim: Int. The output dimension of the Feed-Forward Network. If left
unspecified, it will be tuned automatically.
dropout_rate: Float. Between 0 and 1. If left unspecified, it will be
tuned automatically.
"""
def __init__(self,
max_features: int = 20001,
pretraining: Optional[str] = None,
embedding_dim: Optional[int] = None,
num_heads: Optional[int] = None,
dense_dim: Optional[int] = None,
dropout_rate: Optional[int] = None,
**kwargs):
super().__init__(**kwargs)
self.max_features = max_features
self.pretraining = pretraining
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self. dense_dim = dense_dim
self.dropout_rate = dropout_rate
def get_config(self):
config = super().get_config()
config.update({
'max_features': self.max_features,
'pretraining': self.pretraining,
'embedding_dim': self.embedding_dim,
'num_heads': self.num_heads,
'dense_dim': self.dense_dim,
'dropout_rate': self.dropout_rate})
return config
def build(self, hp, inputs=None):
"""
# Arguments
hp: HyperParameters. The hyperparameters for building the model.
inputs: Tensor of Shape [batch_size, seq_len]
# Returns
Output Tensor of shape `[batch_size, seq_len, embedding_dim]`.
"""
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
pretraining = self.pretraining or hp.Choice(
'pretraining',
['random', 'glove', 'fasttext', 'word2vec', 'none'],
default='none')
embedding_dim = self.embedding_dim or hp.Choice(
'embedding_dim',
[32, 64, 128, 256, 512],
default=128)
num_heads = self.num_heads or hp.Choice('num_heads', [8, 16, 32], default=8)
dense_dim = self.dense_dim or hp.Choice('dense_dim',
[128, 256, 512, 1024, 2048],
default=2048)
dropout_rate = self.dropout_rate or hp.Choice('dropout_rate',
[0.0, 0.25, 0.5],
default=0)
ffn = tf.keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embedding_dim), ]
)
layernorm1 = layers.LayerNormalization(epsilon=1e-6)
layernorm2 = layers.LayerNormalization(epsilon=1e-6)
dropout1 = layers.Dropout(dropout_rate)
dropout2 = layers.Dropout(dropout_rate)
# Token and Position Embeddings
input_node = nest.flatten(inputs)[0]
token_embedding = Embedding(max_features=self.max_features,
pretraining=pretraining,
embedding_dim=embedding_dim,
dropout_rate=dropout_rate).build(hp, input_node)
maxlen = input_node.shape[-1]
batch_size = tf.shape(input_node)[0]
positions = self.pos_array_funct(maxlen, batch_size)
position_embedding = Embedding(max_features=maxlen,
pretraining=pretraining,
embedding_dim=embedding_dim,
dropout_rate=dropout_rate).build(hp,
positions)
output_node = tf.keras.layers.Add()([token_embedding,
position_embedding])
attn_output = MultiHeadSelfAttention(
embedding_dim, num_heads).build(hp, output_node)
attn_output = dropout1(attn_output)
add_inputs_1 = tf.keras.layers.Add()([output_node, attn_output])
out1 = layernorm1(add_inputs_1)
ffn_output = ffn(out1)
ffn_output = dropout2(ffn_output)
add_inputs_2 = tf.keras.layers.Add()([out1, ffn_output])
output = layernorm2(add_inputs_2)
return output
@staticmethod
def pos_array_funct(maxlen, batch_size):
pos_ones = tf.ones((batch_size, 1), dtype=tf.int32)
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = tf.expand_dims(positions, 0)
positions = tf.matmul(pos_ones, positions)
return positions
class ResNetBlock(resnet.HyperResNet, block_module.Block):
"""Block for ResNet.
# Arguments
version: String. 'v1', 'v2' or 'next'. The type of ResNet to use.
If left unspecified, it will be tuned automatically.
pooling: String. 'avg', 'max'. The type of pooling layer to use.
If left unspecified, it will be tuned automatically.
"""
def __init__(self,
version: Optional[str] = None,
pooling: Optional[str] = None,
**kwargs):
if 'include_top' in kwargs:
raise ValueError(
'Argument "include_top" is not supported in ResNetBlock.')
if 'input_shape' in kwargs:
raise ValueError(
'Argument "input_shape" is not supported in ResNetBlock.')
super().__init__(include_top=False, input_shape=(10,), **kwargs)
self.version = version
self.pooling = pooling
def get_config(self):
config = super().get_config()
config.update({
'version': self.version,
'pooling': self.pooling})
return config
def build(self, hp, inputs=None):
self.input_tensor = nest.flatten(inputs)[0]
self.input_shape = None
hp.Choice('version', ['v1', 'v2', 'next'], default='v2')
hp.Choice('pooling', ['avg', 'max'], default='avg')
set_hp_value(hp, 'version', self.version)
set_hp_value(hp, 'pooling', self.pooling)
model = super().build(hp)
return model.outputs
class XceptionBlock(xception.HyperXception, block_module.Block):
"""XceptionBlock.
An Xception structure, used for specifying your model with specific datasets.
The original Xception architecture is from https://arxiv.org/abs/1610.02357.
The data first goes through the entry flow, then through the middle flow which
is repeated eight times, and finally through the exit flow.
This XceptionBlock returns a similar architecture as Xception except without
the last (optional) fully connected layer(s) and logistic regression.
The size of this architecture could be decided by `HyperParameters`, to get an
architecture with a half, an identical, or a double size of the original one.
# Arguments
activation: String. 'selu' or 'relu'. If left unspecified, it will be tuned
automatically.
initial_strides: Int. If left unspecified, it will be tuned automatically.
num_residual_blocks: Int. If left unspecified, it will be tuned
automatically.
pooling: String. 'ave', 'flatten', or 'max'. If left unspecified, it will be
tuned automatically.
"""
def __init__(self,
activation: Optional[str] = None,
initial_strides: Optional[int] = None,
num_residual_blocks: Optional[int] = None,
pooling: Optional[str] = None,
**kwargs):
if 'include_top' in kwargs:
raise ValueError(
'Argument "include_top" is not supported in XceptionBlock.')
if 'input_shape' in kwargs:
raise ValueError(
'Argument "input_shape" is not supported in XceptionBlock.')
super().__init__(include_top=False, input_shape=(10,), **kwargs)
self.activation = activation
self.initial_strides = initial_strides
self.num_residual_blocks = num_residual_blocks
self.pooling = pooling
def get_config(self):
config = super().get_config()
config.update({
'classes': self.classes,
'activation': self.activation,
'initial_strides': self.initial_strides,
'num_residual_blocks': self.num_residual_blocks,
'pooling': self.pooling})
return config
def build(self, hp, inputs=None):
self.input_tensor = nest.flatten(inputs)[0]
self.input_shape = None
hp.Choice('activation', ['relu', 'selu'])
hp.Choice('initial_strides', [2])
hp.Int('num_residual_blocks', 2, 8, default=4)
hp.Choice('pooling', ['avg', 'flatten', 'max'])
set_hp_value(hp, 'activation', self.activation)
set_hp_value(hp, 'initial_strides', self.initial_strides)
set_hp_value(hp, 'num_residual_blocks', self.num_residual_blocks)
set_hp_value(hp, 'pooling', self.pooling)
model = super().build(hp)
return model.outputs
class Embedding(block_module.Block):
"""Word embedding block for sequences.
The input should be tokenized sequences with the same length, where each element
of a sequence should be the index of the word.
# Arguments
max_features: Int. Size of the vocabulary. Must be set if not using
TextToIntSequence before this block. Defaults to 20001.
pretraining: String. 'random' (use random weights instead any pretrained
model), 'glove', 'fasttext' or 'word2vec'. Use pretrained word embedding.
If left unspecified, it will be tuned automatically.
embedding_dim: Int. If left unspecified, it will be tuned automatically.
dropout_rate: Float. The dropout rate for after the Embedding layer.
If left unspecified, it will be tuned automatically.
"""
def __init__(self,
max_features: int = 20001,
pretraining: Optional[str] = None,
embedding_dim: Optional[int] = None,
dropout_rate: Optional[float] = None,
**kwargs):
super().__init__(**kwargs)
self.max_features = max_features
self.pretraining = pretraining
self.embedding_dim = embedding_dim
self.dropout_rate = dropout_rate
def get_config(self):
config = super().get_config()
config.update({
'max_features': self.max_features,
'pretraining': self.pretraining,
'embedding_dim': self.embedding_dim,
'dropout_rate': self.dropout_rate})
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
# TODO: support more pretrained embedding layers.
# glove, fasttext, and word2vec
pretraining = self.pretraining or hp.Choice(
'pretraining',
['random', 'glove', 'fasttext', 'word2vec', 'none'],
default='none')
embedding_dim = self.embedding_dim or hp.Choice(
'embedding_dim',
[32, 64, 128, 256, 512],
default=128)
if pretraining != 'none':
# TODO: load from pretrained weights
layer = layers.Embedding(
input_dim=self.max_features,
output_dim=embedding_dim,
input_length=input_node.shape[1])
# trainable=False,
# weights=[embedding_matrix])
else:
layer = layers.Embedding(
input_dim=self.max_features,
output_dim=embedding_dim)
# input_length=input_node.shape[1],
# trainable=True)
output_node = layer(input_node)
if self.dropout_rate is not None:
dropout_rate = self.dropout_rate
else:
dropout_rate = hp.Choice('dropout_rate', [0.0, 0.25, 0.5], default=0.25)
if dropout_rate > 0:
output_node = layers.Dropout(dropout_rate)(output_node)
return output_node
|
the-stack_0_4793 | import glob
import itertools as it
import numpy as np
import os
import sys
import xgboost as xgb
try:
from sklearn import datasets
from sklearn.preprocessing import scale
except ImportError:
None
class Dataset:
def __init__(self, name, get_dataset, objective, metric,
has_weights=False, use_external_memory=False):
self.name = name
self.objective = objective
self.metric = metric
if has_weights:
self.X, self.y, self.w = get_dataset()
else:
self.X, self.y = get_dataset()
self.w = None
self.use_external_memory = use_external_memory
def get_boston():
data = datasets.load_boston()
return data.data, data.target
def get_digits():
data = datasets.load_digits()
return data.data, data.target
def get_cancer():
data = datasets.load_breast_cancer()
return data.data, data.target
def get_sparse():
rng = np.random.RandomState(199)
n = 5000
sparsity = 0.75
X, y = datasets.make_regression(n, random_state=rng)
X = np.array([[0.0 if rng.uniform(0, 1) < sparsity else x for x in x_row] for x_row in X])
from scipy import sparse
X = sparse.csr_matrix(X)
return X, y
def get_sparse_weights():
return get_weights_regression(1, 10)
def get_small_weights():
return get_weights_regression(1e-6, 1e-5)
def get_weights_regression(min_weight, max_weight):
rng = np.random.RandomState(199)
n = 10000
sparsity = 0.25
X, y = datasets.make_regression(n, random_state=rng)
X = np.array([[np.nan if rng.uniform(0, 1) < sparsity else x
for x in x_row] for x_row in X])
w = np.array([rng.uniform(min_weight, max_weight) for i in range(n)])
return X, y, w
def train_dataset(dataset, param_in, num_rounds=10, scale_features=False):
param = param_in.copy()
param["objective"] = dataset.objective
if dataset.objective == "multi:softmax":
param["num_class"] = int(np.max(dataset.y) + 1)
param["eval_metric"] = dataset.metric
if scale_features:
X = scale(dataset.X, with_mean=isinstance(dataset.X, np.ndarray))
else:
X = dataset.X
if dataset.use_external_memory:
np.savetxt('tmptmp_1234.csv', np.hstack((dataset.y.reshape(len(dataset.y), 1), X)),
delimiter=',')
dtrain = xgb.DMatrix('tmptmp_1234.csv?format=csv&label_column=0#tmptmp_',
weight=dataset.w)
else:
dtrain = xgb.DMatrix(X, dataset.y, weight=dataset.w)
print("Training on dataset: " + dataset.name, file=sys.stderr)
print("Using parameters: " + str(param), file=sys.stderr)
res = {}
bst = xgb.train(param, dtrain, num_rounds, [(dtrain, 'train')],
evals_result=res, verbose_eval=False)
# Free the booster and dmatrix so we can delete temporary files
bst_copy = bst.copy()
del bst
del dtrain
# Cleanup temporary files
if dataset.use_external_memory:
for f in glob.glob("tmptmp_*"):
os.remove(f)
return {"dataset": dataset, "bst": bst_copy, "param": param.copy(),
"eval": res['train'][dataset.metric]}
def parameter_combinations(variable_param):
"""
Enumerate all possible combinations of parameters
"""
result = []
names = sorted(variable_param)
combinations = it.product(*(variable_param[Name] for Name in names))
for set in combinations:
param = {}
for i, name in enumerate(names):
param[name] = set[i]
result.append(param)
return result
def run_suite(param, num_rounds=10, select_datasets=None, scale_features=False):
"""
Run the given parameters on a range of datasets. Objective and eval metric will be automatically set
"""
datasets = [
Dataset("Boston", get_boston, "reg:squarederror", "rmse"),
Dataset("Digits", get_digits, "multi:softmax", "mlogloss"),
Dataset("Cancer", get_cancer, "binary:logistic", "logloss"),
Dataset("Sparse regression", get_sparse, "reg:squarederror", "rmse"),
Dataset("Sparse regression with weights", get_sparse_weights,
"reg:squarederror", "rmse", has_weights=True),
Dataset("Small weights regression", get_small_weights,
"reg:squarederror", "rmse", has_weights=True),
Dataset("Boston External Memory", get_boston,
"reg:squarederror", "rmse",
use_external_memory=True)
]
results = [
]
for d in datasets:
if select_datasets is None or d.name in select_datasets:
results.append(
train_dataset(d, param, num_rounds=num_rounds, scale_features=scale_features))
return results
def non_increasing(L, tolerance):
return all((y - x) < tolerance for x, y in zip(L, L[1:]))
def assert_results_non_increasing(results, tolerance=1e-5):
for r in results:
assert non_increasing(r['eval'], tolerance), r
|
the-stack_0_4795 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Resource(Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, location: str=None, tags=None, **kwargs) -> None:
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
|
the-stack_0_4796 | import base64
import uuid
from .base import DynamicField
from rest_framework.serializers import FileField, ImageField
from rest_framework import exceptions
from django.core.files.base import ContentFile
from django.utils import six
IMAGE_TYPES = {
'jpeg',
'jpg',
'png',
'gif',
'bmp',
'tiff',
'webp',
'ico',
'eps'
}
class DynamicFileFieldBase(
DynamicField
):
def __init__(self, **kwargs):
self.allow_remote = kwargs.pop('allow_remote', True)
self.allow_base64 = kwargs.pop('allow_base64', True)
super(DynamicFileFieldBase, self).__init__(**kwargs)
def get_extension(self, name):
if not name or '.' not in name:
return ''
return name.split('.')[-1].lower()
def to_internal_value_remote(self, name):
if not name:
self.fail('no_name')
field = self.model_field
storage = field.storage
if not storage.exists(name):
self.fail('invalid')
size = storage.size(name)
name_length = len(name)
if not self.allow_empty_file and not size:
self.fail('empty')
if self.max_length and name_length > self.max_length:
self.fail(
'max_length',
max_length=self.max_length,
length=name_length
)
if isinstance(self, ImageField):
ext = self.get_extension(name)
if ext not in IMAGE_TYPES:
return self.fail('invalid_image')
return name
def to_internal_value_base64(self, data):
header, data = data.split(';base64,')
try:
decoded = base64.b64decode(data)
except TypeError:
self.fail('invalid')
file_name = str(uuid.uuid4())[:12]
ext = header.split('/')[-1]
file_name += '.' + ext
data = ContentFile(decoded, name=file_name)
if isinstance(self, ImageField):
if ext not in IMAGE_TYPES:
return self.fail('invalid_image')
return super(
DynamicFileFieldBase,
self
).to_internal_value(data)
def to_internal_value(self, data):
if isinstance(data, six.string_types):
if self.allow_base64 and 'data:' in data and ';base64,' in data:
return self.to_internal_value_base64(data)
elif self.allow_remote:
return self.to_internal_value_remote(data)
else:
raise exceptions.ValidationError()
else:
return super(DynamicFileFieldBase, self).to_internal_value(data)
class DynamicImageField(
DynamicFileFieldBase,
ImageField
):
pass
class DynamicFileField(
DynamicFileFieldBase,
FileField
):
pass
|
the-stack_0_4797 | import logging
import tempfile
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import ignore_leftovers
from ocs_ci.ocs.ocp import wait_for_cluster_connectivity, OCP
from ocs_ci.ocs import constants, node, defaults
from ocs_ci.ocs.resources.pod import get_fio_rw_iops
from ocs_ci.ocs.resources.pvc import delete_pvcs
from tests import helpers
from ocs_ci.ocs.bucket_utils import s3_delete_object, s3_get_object, s3_put_object
from tests.manage.z_cluster.pvc_ops import create_pvcs
from ocs_ci.utility.utils import ceph_health_check, run_cmd, TimeoutSampler
from ocs_ci.utility import templating
from ocs_ci.ocs.cluster import CephCluster, CephClusterExternal
logger = logging.getLogger(__name__)
class Sanity:
"""
Class for cluster health and functional validations
"""
def __init__(self):
"""
Initializer for Sanity class - Init CephCluster() in order to
set the cluster status before starting the tests
"""
self.pvc_objs = list()
self.pod_objs = list()
self.obj_data = ""
self.ceph_cluster = CephCluster()
def health_check(self, cluster_check=True, tries=20):
"""
Perform Ceph and cluster health checks
"""
wait_for_cluster_connectivity(tries=400)
logger.info("Checking cluster and Ceph health")
node.wait_for_nodes_status(timeout=300)
ceph_health_check(namespace=config.ENV_DATA['cluster_namespace'], tries=tries)
if cluster_check:
self.ceph_cluster.cluster_health_check(timeout=60)
def create_resources(self, pvc_factory, pod_factory, run_io=True):
"""
Sanity validation - Create resources (FS and RBD) and run IO
Args:
pvc_factory (function): A call to pvc_factory function
pod_factory (function): A call to pod_factory function
run_io (bool): True for run IO, False otherwise
"""
logger.info("Creating resources and running IO as a sanity functional validation")
for interface in [constants.CEPHBLOCKPOOL, constants.CEPHFILESYSTEM]:
pvc_obj = pvc_factory(interface)
self.pvc_objs.append(pvc_obj)
self.pod_objs.append(pod_factory(pvc=pvc_obj, interface=interface))
if run_io:
for pod in self.pod_objs:
pod.run_io('fs', '1G', runtime=30)
for pod in self.pod_objs:
get_fio_rw_iops(pod)
self.create_obc()
self.verify_obc()
def create_obc(self):
"""
OBC creation for RGW and Nooba
"""
if config.ENV_DATA['platform'] in constants.ON_PREM_PLATFORMS:
obc_rgw = templating.load_yaml(
constants.RGW_OBC_YAML
)
obc_rgw_data_yaml = tempfile.NamedTemporaryFile(
mode='w+', prefix='obc_rgw_data', delete=False
)
templating.dump_data_to_temp_yaml(
obc_rgw, obc_rgw_data_yaml.name
)
logger.info("Creating OBC for rgw")
run_cmd(f"oc create -f {obc_rgw_data_yaml.name}", timeout=2400)
self.obc_rgw = obc_rgw['metadata']['name']
obc_nooba = templating.load_yaml(
constants.MCG_OBC_YAML
)
obc_mcg_data_yaml = tempfile.NamedTemporaryFile(
mode='w+', prefix='obc_mcg_data', delete=False
)
templating.dump_data_to_temp_yaml(
obc_nooba, obc_mcg_data_yaml.name
)
logger.info("create OBC for mcg")
run_cmd(f"oc create -f {obc_mcg_data_yaml.name}", timeout=2400)
self.obc_mcg = obc_nooba['metadata']['name']
def delete_obc(self):
"""
Clenaup OBC resources created above
"""
if config.ENV_DATA['platform'] in constants.ON_PREM_PLATFORMS:
logger.info(f"Deleting rgw obc {self.obc_rgw}")
obcrgw = OCP(
kind='ObjectBucketClaim',
resource_name=f'{self.obc_rgw}'
)
run_cmd(f"oc delete obc/{self.obc_rgw}")
obcrgw.wait_for_delete(
resource_name=f'{self.obc_rgw}',
timeout=300
)
logger.info(f"Deleting mcg obc {self.obc_mcg}")
obcmcg = OCP(kind='ObjectBucketClaim', resource_name=f'{self.obc_mcg}')
run_cmd(
f"oc delete obc/{self.obc_mcg} -n "
f"{defaults.ROOK_CLUSTER_NAMESPACE}"
)
obcmcg.wait_for_delete(resource_name=f'{self.obc_mcg}', timeout=300)
def verify_obc(self):
"""
OBC verification from external cluster perspective,
we will check 2 OBCs
"""
sample = TimeoutSampler(
300,
5,
self.ceph_cluster.noobaa_health_check
)
sample.wait_for_func_status(True)
def delete_resources(self):
"""
Sanity validation - Delete resources (FS and RBD)
"""
logger.info("Deleting resources as a sanity functional validation")
self.delete_obc()
for pod_obj in self.pod_objs:
pod_obj.delete()
for pod_obj in self.pod_objs:
pod_obj.ocp.wait_for_delete(pod_obj.name)
for pvc_obj in self.pvc_objs:
pvc_obj.delete()
for pvc_obj in self.pvc_objs:
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
@ignore_leftovers
def create_pvc_delete(self, multi_pvc_factory, project=None):
"""
Creates and deletes all types of PVCs
"""
# Create rbd pvcs
pvc_objs_rbd = create_pvcs(
multi_pvc_factory=multi_pvc_factory, interface='CephBlockPool',
project=project, status="", storageclass=None
)
# Create cephfs pvcs
pvc_objs_cephfs = create_pvcs(
multi_pvc_factory=multi_pvc_factory, interface='CephFileSystem',
project=project, status="", storageclass=None
)
all_pvc_to_delete = pvc_objs_rbd + pvc_objs_cephfs
# Check pvc status
for pvc_obj in all_pvc_to_delete:
helpers.wait_for_resource_state(
resource=pvc_obj, state=constants.STATUS_BOUND, timeout=300
)
# Start deleting PVC
delete_pvcs(all_pvc_to_delete)
# Check PVCs are deleted
for pvc_obj in all_pvc_to_delete:
pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
logger.info("All PVCs are deleted as expected")
def obc_put_obj_create_delete(self, mcg_obj, bucket_factory):
"""
Creates bucket then writes, reads and deletes objects
"""
bucket_name = bucket_factory(amount=1, interface='OC')[0].name
self.obj_data = "A string data"
for i in range(0, 30):
key = 'Object-key-' + f"{i}"
logger.info(f"Write, read and delete object with key: {key}")
assert s3_put_object(mcg_obj, bucket_name, key, self.obj_data), f"Failed: Put object, {key}"
assert s3_get_object(mcg_obj, bucket_name, key), f"Failed: Get object, {key}"
assert s3_delete_object(mcg_obj, bucket_name, key), f"Failed: Delete object, {key}"
class SanityExternalCluster(Sanity):
"""
Helpers for health check and functional validation
in External mode
"""
def __init__(self):
"""
Initializer for Sanity class - Init CephCluster() in order to
set the cluster status before starting the tests
"""
self.pvc_objs = list()
self.pod_objs = list()
self.ceph_cluster = CephClusterExternal()
|
the-stack_0_4802 | import logging
import astropy.units as u
from astropy.wcs import (WCS, WCSSUB_CELESTIAL, WCSSUB_CUBEFACE,
WCSSUB_LATITUDE, WCSSUB_LONGITUDE, WCSSUB_SPECTRAL,
WCSSUB_STOKES, InvalidSubimageSpecificationError)
# Use this once in specutils
from ...utils.wcs_utils import (convert_spectral_axis,
determine_ctype_from_vconv)
from ..wcs_adapter import WCSAdapter, WCSAxes
__all__ = ['FITSWCSAdapter']
class FITSWCSAdapter(WCSAdapter):
"""
Adapter class that adds support for FITSWCS objects.
In the wild, fits WCS headers are often non-standard compliant, but
can be interpreted with little ambiguity (e.g. the CTYPE of the
wavelength axis is called "Wavelength" instead of the standard fits
"WAVE"). In some common cases, this class will thus read files that
are not fully compliant. In these cases, it prints a warning message.
"""
wrapped_class = WCS
axes = None
substitute_spec_axis_names = ['linear', 'wavelength']
def __init__(self, wcs):
super(FITSWCSAdapter, self).__init__(wcs)
self._spec_axis = None
# Store a reference to all axes information within the wcs object
self.axes = WCSAxes(
longitude=self.wcs.sub([WCSSUB_LONGITUDE]),
latitude=self.wcs.sub([WCSSUB_LATITUDE]),
cubeface=self.wcs.sub([WCSSUB_CUBEFACE]),
spectral=self.wcs.sub([WCSSUB_SPECTRAL]),
stokes=self.wcs.sub([WCSSUB_STOKES]),
celestial=self.wcs.sub([WCSSUB_CELESTIAL])
)
# TODO: make this more efficient. Check to see whether the spectral
# axis was actually parsed
if self.axes.spectral.naxis == 0:
self.axes = self.axes._replace(spectral=self.wcs.sub([self.spec_axis + 1]))
def __getitem__(self, item):
"""Pass slicing information to the internal `FITSWCS` object."""
return self.wcs[item]
def __deepcopy__(self, *args, **kwargs):
"""
Ensure deepcopy is passed through to the underlying fits wcs object.
Doing so allows for proper memoization handling in the astropy fits
machinery.
"""
return self.__class__(self.wcs.__deepcopy__(*args, **kwargs))
def world_to_pixel(self, world_array):
"""
Method for performing the world to pixel transformations.
"""
with u.set_enabled_equivalencies(u.spectral()):
world_array = u.Quantity(world_array, unit=self.spectral_axis_unit)
return self.axes.spectral.all_world2pix(world_array.value, 0)[0]
def pixel_to_world(self, pixel_array):
"""
Method for performing the pixel to world transformations.
"""
return u.Quantity(self.axes.spectral.all_pix2world(pixel_array, 0)[0],
self.spectral_axis_unit)
@property
def spec_axis(self):
"""
Try and parse the spectral axis of the fits wcs object.
"""
self._spec_axis = self.wcs.wcs.spec
if (self._spec_axis < 0) and (self._wcs.wcs.spec) < 0:
ctypelist = [c.lower() for c in self.wcs.wcs.ctype]
for n in self.substitute_spec_axis_names:
if n in ctypelist:
self._spec_axis = ctypelist.index(n)
logging.warning("WCS has a non-standard spectral axis, 'ctype's might be incorrect. Assuming the axis {} labeled '{}' is spectral and proceeding.".format(self._spec_axis, n))
break
else:
raise InvalidSubimageSpecificationError(
"Cannot find a spectral axis in the provided WCS."
"Are your 'ctype's correct?")
return self._spec_axis
@property
def spectral_axis_unit(self):
"""
Returns the unit of the spectral axis.
"""
return self._wcs.wcs.cunit[self.spec_axis]
@property
def rest_frequency(self):
"""
Returns the rest frequency defined in the WCS.
"""
return self.wcs.wcs.restfrq
@property
def rest_wavelength(self):
"""
Returns the rest wavelength defined in the WCS.
"""
return self.wcs.wcs.restwav
def bin_edges(self):
# the WCS doesn't know about its own pixel array
edge_indices = list(self.axes.spectral.pixel_indices - 0.5) + \
[self.axes.spectral.pixel_indices[-1] + 0.5]
return self.pixel_to_world(edge_indices, 0)
def with_spectral_unit(self, unit, rest_value=None, velocity_convention=None):
# Shorter versions to keep lines under 80
ctype_from_vconv = determine_ctype_from_vconv
out_ctype = ctype_from_vconv(self._wcs.wcs.ctype[self.spec_axis],
unit,
velocity_convention=velocity_convention)
new_wcs = convert_spectral_axis(self._wcs, unit, out_ctype,
rest_value=rest_value)
new_wcs.wcs.set()
return new_wcs
|
the-stack_0_4803 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/check_images.py
#
# TODO 0: Add your information below for Programmer & Date Created.
# PROGRAMMER: Luis Candanedo
# DATE CREATED: 5/24/2020
# REVISED DATE:
# PURPOSE: Classifies pet images using a pretrained CNN model, compares these
# classifications to the true identity of the pets in the images, and
# summarizes how well the CNN performed on the image classification task.
# Note that the true identity of the pet (or object) in the image is
# indicated by the filename of the image. Therefore, your program must
# first extract the pet image label from the filename before
# classifying the images using the pretrained CNN model. With this
# program we will be comparing the performance of 3 different CNN model
# architectures to determine which provides the 'best' classification.
#
# Use argparse Expected Call with <> indicating expected user input:
# python check_images.py --dir <directory with images> --arch <model>
# --dogfile <file that contains dognames>
# Example call:
# python check_images.py --dir pet_images/ --arch vgg --dogfile dognames.txt
##
# Imports python modules
from time import time, sleep
# Imports print functions that check the lab
from print_functions_for_lab_checks import *
# Imports functions created for this program
from get_input_args import get_input_args
from get_pet_labels import get_pet_labels
from classify_images import classify_images
from adjust_results4_isadog import adjust_results4_isadog
from calculates_results_stats import calculates_results_stats
from print_results import print_results
# Main program function defined below
def main():
# TODO 0: Measures total program runtime by collecting start time
start_time = time()
#sleep(5)
# TODO 1: Define get_input_args function within the file get_input_args.py
# This function retrieves 3 Command Line Arugments from user as input from
# the user running the program from a terminal window. This function returns
# the collection of these command line arguments from the function call as
# the variable in_arg
in_arg = get_input_args()
# Function that checks command line arguments using in_arg
check_command_line_arguments(in_arg)
# TODO 2: Define get_pet_labels function within the file get_pet_labels.py
# Once the get_pet_labels function has been defined replace 'None'
# in the function call with in_arg.dir Once you have done the replacements
# your function call should look like this:
# get_pet_labels(in_arg.dir)
# This function creates the results dictionary that contains the results,
# this dictionary is returned from the function call as the variable results
results = get_pet_labels(in_arg.dir)
#print(results)
# Function that checks Pet Images in the results Dictionary using results
check_creating_pet_image_labels(results)
# TODO 3: Define classify_images function within the file classiy_images.py
# Once the classify_images function has been defined replace first 'None'
# in the function call with in_arg.dir and replace the last 'None' in the
# function call with in_arg.arch Once you have done the replacements your
# function call should look like this:
# classify_images(in_arg.dir, results, in_arg.arch)
# Creates Classifier Labels with classifier function, Compares Labels,
# and adds these results to the results dictionary - results
classify_images(in_arg.dir, results, in_arg.arch)
# Function that checks Results Dictionary using results
check_classifying_images(results)
# TODO 4: Define adjust_results4_isadog function within the file adjust_results4_isadog.py
# Once the adjust_results4_isadog function has been defined replace 'None'
# in the function call with in_arg.dogfile Once you have done the
# replacements your function call should look like this:
# adjust_results4_isadog(results, in_arg.dogfile)
# Adjusts the results dictionary to determine if classifier correctly
# classified images as 'a dog' or 'not a dog'. This demonstrates if
# model can correctly classify dog images as dogs (regardless of breed)
adjust_results4_isadog(results, in_arg.dogfile)
# Function that checks Results Dictionary for is-a-dog adjustment using results
check_classifying_labels_as_dogs(results)
# TODO 5: Define calculates_results_stats function within the file calculates_results_stats.py
# This function creates the results statistics dictionary that contains a
# summary of the results statistics (this includes counts & percentages). This
# dictionary is returned from the function call as the variable results_stats
# Calculates results of run and puts statistics in the Results Statistics
# Dictionary - called results_stats
results_stats = calculates_results_stats(results)
# Function that checks Results Statistics Dictionary using results_stats
check_calculating_results(results, results_stats)
# TODO 6: Define print_results function within the file print_results.py
# Once the print_results function has been defined replace 'None'
# in the function call with in_arg.arch Once you have done the
# replacements your function call should look like this:
# print_results(results, results_stats, in_arg.arch, True, True)
# Prints summary results, incorrect classifications of dogs (if requested)
# and incorrectly classified breeds (if requested)
print_results(results, results_stats, in_arg.arch, True, True)
# TODO 0: Measure total program runtime by collecting end time
end_time = time()
# TODO 0: Computes overall runtime in seconds & prints it in hh:mm:ss format
tot_time = end_time-start_time#calculate difference between end time and start time
print("\n** Total Elapsed Runtime:",
str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
+str(int((tot_time%3600)%60)) )
# Call to main function to run the program
if __name__ == "__main__":
main()
|
the-stack_0_4804 | import asyncio
import warnings
import pytest
from distributed import Worker, WorkerPlugin
from distributed.utils_test import async_wait_for, gen_cluster, inc
class MyPlugin(WorkerPlugin):
name = "MyPlugin"
def __init__(self, data, expected_notifications=None):
self.data = data
self.expected_notifications = expected_notifications
def setup(self, worker):
assert isinstance(worker, Worker)
self.worker = worker
self.worker._my_plugin_status = "setup"
self.worker._my_plugin_data = self.data
self.observed_notifications = []
def teardown(self, worker):
self.worker._my_plugin_status = "teardown"
if self.expected_notifications is not None:
assert len(self.observed_notifications) == len(self.expected_notifications)
for expected, real in zip(
self.expected_notifications, self.observed_notifications
):
assert expected == real
def transition(self, key, start, finish, **kwargs):
self.observed_notifications.append(
{"key": key, "start": start, "finish": finish}
)
@gen_cluster(client=True, nthreads=[])
async def test_create_with_client(c, s):
await c.register_worker_plugin(MyPlugin(123))
worker = await Worker(s.address, loop=s.loop)
assert worker._my_plugin_status == "setup"
assert worker._my_plugin_data == 123
await worker.close()
assert worker._my_plugin_status == "teardown"
@gen_cluster(client=True, nthreads=[])
async def test_remove_with_client(c, s):
await c.register_worker_plugin(MyPlugin(123), name="foo")
await c.register_worker_plugin(MyPlugin(546), name="bar")
worker = await Worker(s.address, loop=s.loop)
# remove the 'foo' plugin
await c.unregister_worker_plugin("foo")
assert worker._my_plugin_status == "teardown"
# check that on the scheduler registered worker plugins we only have 'bar'
assert len(s.worker_plugins) == 1
assert "bar" in s.worker_plugins
# check on the worker plugins that we only have 'bar'
assert len(worker.plugins) == 1
assert "bar" in worker.plugins
# let's remove 'bar' and we should have none worker plugins
await c.unregister_worker_plugin("bar")
assert worker._my_plugin_status == "teardown"
assert not s.worker_plugins
assert not worker.plugins
@gen_cluster(client=True, nthreads=[])
async def test_remove_with_client_raises(c, s):
await c.register_worker_plugin(MyPlugin(123), name="foo")
worker = await Worker(s.address, loop=s.loop)
with pytest.raises(ValueError, match="bar"):
await c.unregister_worker_plugin("bar")
@gen_cluster(client=True, worker_kwargs={"plugins": [MyPlugin(5)]})
async def test_create_on_construction(c, s, a, b):
assert len(a.plugins) == len(b.plugins) == 1
assert a._my_plugin_status == "setup"
assert a._my_plugin_data == 5
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_normal_task_transitions_called(c, s, w):
expected_notifications = [
{"key": "task", "start": "released", "finish": "waiting"},
{"key": "task", "start": "waiting", "finish": "ready"},
{"key": "task", "start": "ready", "finish": "executing"},
{"key": "task", "start": "executing", "finish": "memory"},
{"key": "task", "start": "memory", "finish": "released"},
{"key": "task", "start": "released", "finish": "forgotten"},
]
plugin = MyPlugin(1, expected_notifications=expected_notifications)
await c.register_worker_plugin(plugin)
await c.submit(lambda x: x, 1, key="task")
await async_wait_for(lambda: not w.tasks, timeout=10)
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_failing_task_transitions_called(c, s, w):
def failing(x):
raise Exception()
expected_notifications = [
{"key": "task", "start": "released", "finish": "waiting"},
{"key": "task", "start": "waiting", "finish": "ready"},
{"key": "task", "start": "ready", "finish": "executing"},
{"key": "task", "start": "executing", "finish": "error"},
{"key": "task", "start": "error", "finish": "released"},
{"key": "task", "start": "released", "finish": "forgotten"},
]
plugin = MyPlugin(1, expected_notifications=expected_notifications)
await c.register_worker_plugin(plugin)
with pytest.raises(Exception):
await c.submit(failing, 1, key="task")
@gen_cluster(
nthreads=[("127.0.0.1", 1)], client=True, worker_kwargs={"resources": {"X": 1}}
)
async def test_superseding_task_transitions_called(c, s, w):
expected_notifications = [
{"key": "task", "start": "released", "finish": "waiting"},
{"key": "task", "start": "waiting", "finish": "constrained"},
{"key": "task", "start": "constrained", "finish": "executing"},
{"key": "task", "start": "executing", "finish": "memory"},
{"key": "task", "start": "memory", "finish": "released"},
{"key": "task", "start": "released", "finish": "forgotten"},
]
plugin = MyPlugin(1, expected_notifications=expected_notifications)
await c.register_worker_plugin(plugin)
await c.submit(lambda x: x, 1, key="task", resources={"X": 1})
await async_wait_for(lambda: not w.tasks, timeout=10)
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dependent_tasks(c, s, w):
dsk = {"dep": 1, "task": (inc, "dep")}
expected_notifications = [
{"key": "dep", "start": "released", "finish": "waiting"},
{"key": "dep", "start": "waiting", "finish": "ready"},
{"key": "dep", "start": "ready", "finish": "executing"},
{"key": "dep", "start": "executing", "finish": "memory"},
{"key": "task", "start": "released", "finish": "waiting"},
{"key": "task", "start": "waiting", "finish": "ready"},
{"key": "task", "start": "ready", "finish": "executing"},
{"key": "task", "start": "executing", "finish": "memory"},
{"key": "dep", "start": "memory", "finish": "released"},
{"key": "task", "start": "memory", "finish": "released"},
{"key": "task", "start": "released", "finish": "forgotten"},
{"key": "dep", "start": "released", "finish": "forgotten"},
]
plugin = MyPlugin(1, expected_notifications=expected_notifications)
await c.register_worker_plugin(plugin)
await c.get(dsk, "task", sync=False)
await async_wait_for(lambda: not w.tasks, timeout=10)
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_empty_plugin(c, s, w):
class EmptyPlugin:
pass
await c.register_worker_plugin(EmptyPlugin())
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_default_name(c, s, w):
class MyCustomPlugin(WorkerPlugin):
pass
await c.register_worker_plugin(MyCustomPlugin())
assert len(w.plugins) == 1
assert next(iter(w.plugins)).startswith("MyCustomPlugin-")
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_assert_no_warning_no_overload(c, s, a):
"""Assert we do not receive a deprecation warning if we do not overload any
methods
"""
class Dummy(WorkerPlugin):
pass
with warnings.catch_warnings(record=True) as record:
await c.register_worker_plugin(Dummy())
assert await c.submit(inc, 1, key="x") == 2
while "x" in a.tasks:
await asyncio.sleep(0.01)
assert not record
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_WorkerPlugin_overwrite(c, s, w):
class MyCustomPlugin(WorkerPlugin):
name = "custom"
def setup(self, worker):
self.worker = worker
self.worker.foo = 0
def transition(self, *args, **kwargs):
self.worker.foo = 123
def teardown(self, worker):
del self.worker.foo
await c.register_worker_plugin(MyCustomPlugin())
assert w.foo == 0
await c.submit(inc, 0)
assert w.foo == 123
while s.tasks or w.tasks:
await asyncio.sleep(0.01)
class MyCustomPlugin(WorkerPlugin):
name = "custom"
def setup(self, worker):
self.worker = worker
self.worker.bar = 0
def transition(self, *args, **kwargs):
self.worker.bar = 456
def teardown(self, worker):
del self.worker.bar
await c.register_worker_plugin(MyCustomPlugin())
assert not hasattr(w, "foo")
assert w.bar == 0
await c.submit(inc, 0)
assert w.bar == 456
|
the-stack_0_4806 | import unittest
import paddle.v2.fluid.core as core
from paddle.v2.fluid.executor import Executor
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.backward import append_backward_ops
from paddle.v2.fluid.framework import g_main_program
import numpy
class TestShrinkRNNMemory(unittest.TestCase):
def test_shrink_rnn_memory(self):
x = layers.data('x', shape=[100], data_type='float32')
x.stop_gradient = False
table = layers.lod_rank_table(x=x)
i = layers.zeros(dtype='int64', shape=[1])
mem1 = layers.shrink_memory(x=x, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
mem2 = layers.shrink_memory(x=mem1, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
mem3 = layers.shrink_memory(x=mem2, i=i, table=table)
cpu = core.CPUPlace()
tensor = core.LoDTensor()
tensor.set_lod([[0, 2, 5, 6]])
tensor_np = numpy.random.random(size=(3, 100)).astype('float32')
tensor.set(tensor_np, cpu)
exe = Executor(cpu)
outs = map(numpy.array,
exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3]))
self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0]))
self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1]))
self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2]))
mem3_mean = layers.mean(x=mem3)
append_backward_ops(loss=mem3_mean)
x_grad = map(numpy.array,
exe.run(feed={'x': tensor},
fetch_list=[
g_main_program.global_block().var('x@GRAD')
]))[0]
self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_4807 | #!/usr/bin/env python
# coding=utf-8
"""
Ant Group
Copyright (c) 2004-2021 All Rights Reserved.
------------------------------------------------------
File Name : lr_train_and_predict.py
Author : Qizhi Zhang
Email: [email protected]
Create Time : 2021/5/21 上午10:13
Description : description what the main function of this file
"""
from stensorflow.engine.start_server import start_local_server, start_client
import tensorflow as tf
from stensorflow.global_var import StfConfig
from stensorflow.basic.basic_class.private import PrivateTensor
from stensorflow.ml.logistic_regression import LogisticRegression
import numpy as np
import random
import time
random.seed(0)
"""
A Example of training a LR model on a dataset of feature number 291 and predict using
this model.
The features are in the party L, the label is in the party R.
"""
#start_local_server(config_file="../conf/config_ym.json")
start_local_server(config_file="../conf/config_epsilon.json")
#start_client(config_file="../conf/config_ym.json", job_name="workerR")
matchColNum = 0
featureNumX = 3000
featureNumY = 0
record_num = 10
epoch = 100
batch_size = 2
learning_rate = 0.01
clip_value = 5.0
train_batch_num = epoch * record_num // batch_size + 1
pred_record_num = 10
pred_batch_num = pred_record_num // batch_size + 1
# -------------define a private tensor x_train of party L and a private tensor y_train on the party R
x_train = PrivateTensor(owner='L')
y_train = PrivateTensor(owner='R')
format_x = [["a"]] * matchColNum + [[0.2]] * featureNumX
format_y = [["a"]] * matchColNum + [[0.3]] * featureNumY + [[1.0]]
# ----------------- load data from files -------------------
x_train.load_from_file(path=StfConfig.train_file_onL,
record_defaults=format_x, batch_size=batch_size, repeat=epoch + 2, skip_col_num=matchColNum,
clip_value=clip_value, skip_row_num=0)
y_train.load_from_file(path=StfConfig.train_file_onR,
record_defaults=format_y, batch_size=batch_size, repeat=epoch + 2, skip_col_num=matchColNum,
clip_value=clip_value, skip_row_num=0)
print("StfConfig.parties=", StfConfig.parties)
# ----------- build a LR model ---------------
model = LogisticRegression(num_features=featureNumX + featureNumY, learning_rate=learning_rate)
# -------------start a tensorflow session, and initialize all variables -----------------
sess = tf.compat.v1.Session(StfConfig.target)
init_op = tf.compat.v1.global_variables_initializer()
sess.run(init_op)
# -------------train the model ------------------------
start_time = time.time()
model.fit(sess=sess, x=x_train, y=y_train, num_batches=train_batch_num)
print("train time=", time.time()-start_time)
save_op = model.save(model_file_path="./")
sess.run(save_op)
# ------------define the private tensors for test dataset ----------------
x_test = PrivateTensor(owner='L')
y_test = PrivateTensor(owner='R')
x_test.load_from_file(path=StfConfig.pred_file_onL,
record_defaults=format_x, batch_size=batch_size, repeat=2, skip_col_num=matchColNum,
clip_value=clip_value, skip_row_num=0)
id = y_test.load_from_file_withid(path=StfConfig.pred_file_onR,
record_defaults=format_y, batch_size=batch_size, repeat=2,
id_col_num=matchColNum, clip_value=clip_value, skip_row_num=0)
# --------------predict --------------
model.predict(id, x_test, pred_batch_num, sess)
sess.close()
|
the-stack_0_4808 | # https://math.stackexchange.com/questions/4231713/has-anyone-ever-attempted-to-find-all-splits-of-a-rectangle-into-smaller-rectang
# from random import randint
from numpy import random
# from pymclevel.box import BoundingBox
def make2dList(nRows, nCols):
newList = []
for row in xrange(nRows):
# give each new row an empty list
newList.append([])
for col in xrange(nCols):
# initialize with 0s
newList[row].append(0)
return newList
class RectangleSplitter:
def __init__(self, width, length):
self._groundMatrix = make2dList(width, length)
self.newRectMinWidth = 0 # min(width, 3)
self.newRectMinLength = 0 # min(length, 3)
# docs: https://numpy.org/doc/stable/reference/random/legacy.html#numpy.random.RandomState
# distribution graphs: https://statdist.com/
self.randomState = random.RandomState()
# def __init__(self, selectionBox) -> None:
# selectionBox = BoundingBox(selectionBox) # DEBUG: to get the class shown correctly in IDE
# self._groundMatrix = make2dList(selectionBox.width, selectionBox.length)
def Partition(self, partitionCount):
"""
example groundMatrix:
y0, y1, y2
x0 [0 1 2]
x1 [3 4 5]
x2 [6 7 8]
x3 [ 9 10 11]
Algorithm:
for n = partitionCount
- random left or top edge
- count number of distinct rectangles on that edge + at which index they start
- random amount of rect to use (full length)
- push border random % amount in => parameter
- fill with new index
"""
for n in xrange(1, partitionCount):
self.CalculatePartition(n)
return self._groundMatrix
def CalculatePartition(self, n):
print("partition: " + str(n))
# left = self.randomState.randint(0, 2)
left = n % 2
leftRectStartList = self.GetListOfLeftBorderRectangleStarts()
leftRectBorderCount = len(leftRectStartList)
topRectStartList = self.GetListOfTopBorderRectangleStarts()
topRectBorderCount = len(topRectStartList)
# add the end of base rectangle as last elements:
topRectStartList.append(len(self._groundMatrix[0]))
leftRectStartList.append(len(self._groundMatrix))
# print("left List: " + str(leftRectBorderCount))
# print(leftRectStartList)
# print("top List: " + str(topRectBorderCount))
# print(topRectStartList)
if left == 1:
# push from left
rectIndex = self.GetRandomPushy(0, topRectBorderCount)
min_width = 0 # max(self.newRectMinWidth, leftRectStartList[0]-1)
newRectMaxX = self.GetRandomNormal(min_width, leftRectStartList[0] - 1)
newRectMaxY = topRectStartList[rectIndex]-1 # next rect start is the max
# print("push from left to x/y: " + str(newRectMaxX) + "/" + str(newRectMaxY))
elif left == 0:
# push from top
rectIndex = self.GetRandomPushy(0, leftRectBorderCount)
newRectMaxX = leftRectStartList[rectIndex]-1 # next rect start is the max
min_width = 0 # max(self.newRectMinWidth, leftRectStartList[0] - 1)
newRectMaxY = self.GetRandomNormal(min_width, topRectStartList[0] - 1)
# print("push from top to x/y: " + str(newRectMaxX) + "/" + str(newRectMaxY))
self.FillNextPartition(n, newRectMaxX, newRectMaxY)
# print(self._groundMatrix)
def FillNextPartition(self, partitionId, maxX, maxY):
for x in xrange(0, maxX + 1):
for z in xrange(0, maxY + 1):
self._groundMatrix[x][z] = partitionId
def GetListOfTopBorderRectangleStarts(self):
count = []
lastRectangleId = -1
for yi, y in enumerate(self._groundMatrix[0]):
if y != lastRectangleId:
count.append(yi)
lastRectangleId = y
count.pop(0) # remove first change
return count
def GetListOfLeftBorderRectangleStarts(self):
count = []
lastRectangleId = -1
for yi, y in enumerate(self._groundMatrix):
if y[0] != lastRectangleId:
count.append(yi)
lastRectangleId = y[0]
count.pop(0) # remove first change
return count
def GetRandomPushy(self, start, end):
if end <= start:
return start
value = self.randomState.beta(4, 2)
# value = self.randomState.normal(0.5, 0.1)
# value = self.randomState.beta(1, 1) # uniform
#print ("rand value between " + str(start) + " end " + str(end) + " is: " + str(value))
value = int(round(start + (value / float(1 / float(end - start)))))
#print ("rand value between " + str(start) + " end " + str(end) + " is: " + str(value))
return value
def GetRandomNormal(self, start, end):
if end <= start:
return start
# value = self.randomState.beta(2, 4)
value = self.randomState.normal(0.5, 0.1)
# value = self.randomState.beta(1, 1) # uniform
#print ("rand value between " + str(start) + " end " + str(end) + " is: " + str(value))
value = int(round(start + (value / float(1 / float(end - start)))))
#print ("rand value between " + str(start) + " end " + str(end) + " is: " + str(value))
return value
|
the-stack_0_4811 | #!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2021, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <[email protected]> <[email protected]>
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
from launch_ros.substitutions import FindPackageShare
def generate_launch_description():
prefix = LaunchConfiguration('prefix', default='')
hw_ns = LaunchConfiguration('hw_ns', default='xarm')
limited = LaunchConfiguration('limited', default=False)
effort_control = LaunchConfiguration('effort_control', default=False)
velocity_control = LaunchConfiguration('velocity_control', default=False)
add_gripper = LaunchConfiguration('add_gripper', default=False)
add_vacuum_gripper = LaunchConfiguration('add_vacuum_gripper', default=False)
add_other_geometry = LaunchConfiguration('add_other_geometry', default=False)
geometry_type = LaunchConfiguration('geometry_type', default='box')
geometry_mass = LaunchConfiguration('geometry_mass', default=0.1)
geometry_height = LaunchConfiguration('geometry_height', default=0.1)
geometry_radius = LaunchConfiguration('geometry_radius', default=0.1)
geometry_length = LaunchConfiguration('geometry_length', default=0.1)
geometry_width = LaunchConfiguration('geometry_width', default=0.1)
geometry_mesh_filename = LaunchConfiguration('geometry_mesh_filename', default='')
geometry_mesh_origin_xyz = LaunchConfiguration('geometry_mesh_origin_xyz', default='"0 0 0"')
geometry_mesh_origin_rpy = LaunchConfiguration('geometry_mesh_origin_rpy', default='"0 0 0"')
geometry_mesh_tcp_xyz = LaunchConfiguration('geometry_mesh_tcp_xyz', default='"0 0 0"')
geometry_mesh_tcp_rpy = LaunchConfiguration('geometry_mesh_tcp_rpy', default='"0 0 0"')
# xarm moveit gazebo launch
# xarm_moveit_config/launch/_xarm_moveit_gazobo.launch.py
xarm_moveit_gazebo_launch = IncludeLaunchDescription(
PythonLaunchDescriptionSource(PathJoinSubstitution([FindPackageShare('xarm_moveit_config'), 'launch', '_xarm_moveit_gazebo.launch.py'])),
launch_arguments={
'prefix': prefix,
'hw_ns': hw_ns,
'limited': limited,
'effort_control': effort_control,
'velocity_control': velocity_control,
'add_gripper': add_gripper,
'add_vacuum_gripper': add_vacuum_gripper,
'dof': '7',
'no_gui_ctrl': 'false',
'add_other_geometry': add_other_geometry,
'geometry_type': geometry_type,
'geometry_mass': geometry_mass,
'geometry_height': geometry_height,
'geometry_radius': geometry_radius,
'geometry_length': geometry_length,
'geometry_width': geometry_width,
'geometry_mesh_filename': geometry_mesh_filename,
'geometry_mesh_origin_xyz': geometry_mesh_origin_xyz,
'geometry_mesh_origin_rpy': geometry_mesh_origin_rpy,
'geometry_mesh_tcp_xyz': geometry_mesh_tcp_xyz,
'geometry_mesh_tcp_rpy': geometry_mesh_tcp_rpy,
}.items(),
)
return LaunchDescription([
xarm_moveit_gazebo_launch
])
|
the-stack_0_4813 | """
Holds classes and utility methods related to build graph
"""
import copy
import logging
import os
import threading
from pathlib import Path
from typing import Sequence, Tuple, List, Any, Optional, Dict, cast, NamedTuple
from copy import deepcopy
from uuid import uuid4
import tomlkit
from samcli.lib.build.exceptions import InvalidBuildGraphException
from samcli.lib.providers.provider import Function, LayerVersion
from samcli.lib.samlib.resource_metadata_normalizer import (
SAM_RESOURCE_ID_KEY,
SAM_IS_NORMALIZED,
)
from samcli.lib.utils.packagetype import ZIP
from samcli.lib.utils.architecture import X86_64
LOG = logging.getLogger(__name__)
DEFAULT_BUILD_GRAPH_FILE_NAME = "build.toml"
DEFAULT_DEPENDENCIES_DIR = os.path.join(".aws-sam", "deps")
# filed names for the toml table
PACKAGETYPE_FIELD = "packagetype"
CODE_URI_FIELD = "codeuri"
RUNTIME_FIELD = "runtime"
METADATA_FIELD = "metadata"
FUNCTIONS_FIELD = "functions"
SOURCE_HASH_FIELD = "source_hash"
MANIFEST_HASH_FIELD = "manifest_hash"
ENV_VARS_FIELD = "env_vars"
LAYER_NAME_FIELD = "layer_name"
BUILD_METHOD_FIELD = "build_method"
COMPATIBLE_RUNTIMES_FIELD = "compatible_runtimes"
LAYER_FIELD = "layer"
ARCHITECTURE_FIELD = "architecture"
HANDLER_FIELD = "handler"
def _function_build_definition_to_toml_table(
function_build_definition: "FunctionBuildDefinition",
) -> tomlkit.api.Table:
"""
Converts given function_build_definition into toml table representation
Parameters
----------
function_build_definition: FunctionBuildDefinition
FunctionBuildDefinition which will be converted into toml table
Returns
-------
tomlkit.api.Table
toml table of FunctionBuildDefinition
"""
toml_table = tomlkit.table()
if function_build_definition.packagetype == ZIP:
toml_table[CODE_URI_FIELD] = function_build_definition.codeuri
toml_table[RUNTIME_FIELD] = function_build_definition.runtime
toml_table[ARCHITECTURE_FIELD] = function_build_definition.architecture
toml_table[HANDLER_FIELD] = function_build_definition.handler
if function_build_definition.source_hash:
toml_table[SOURCE_HASH_FIELD] = function_build_definition.source_hash
toml_table[MANIFEST_HASH_FIELD] = function_build_definition.manifest_hash
toml_table[PACKAGETYPE_FIELD] = function_build_definition.packagetype
toml_table[FUNCTIONS_FIELD] = [f.full_path for f in function_build_definition.functions]
if function_build_definition.metadata:
toml_table[METADATA_FIELD] = function_build_definition.metadata
if function_build_definition.env_vars:
toml_table[ENV_VARS_FIELD] = function_build_definition.env_vars
return toml_table
def _toml_table_to_function_build_definition(uuid: str, toml_table: tomlkit.api.Table) -> "FunctionBuildDefinition":
"""
Converts given toml table into FunctionBuildDefinition instance
Parameters
----------
uuid: str
key of the function toml_table instance
toml_table: tomlkit.api.Table
function build definition as toml table
Returns
-------
FunctionBuildDefinition
FunctionBuildDefinition of given toml table
"""
function_build_definition = FunctionBuildDefinition(
toml_table.get(RUNTIME_FIELD),
toml_table.get(CODE_URI_FIELD),
toml_table.get(PACKAGETYPE_FIELD, ZIP),
toml_table.get(ARCHITECTURE_FIELD, X86_64),
dict(toml_table.get(METADATA_FIELD, {})),
toml_table.get(HANDLER_FIELD, ""),
toml_table.get(SOURCE_HASH_FIELD, ""),
toml_table.get(MANIFEST_HASH_FIELD, ""),
dict(toml_table.get(ENV_VARS_FIELD, {})),
)
function_build_definition.uuid = uuid
return function_build_definition
def _layer_build_definition_to_toml_table(layer_build_definition: "LayerBuildDefinition") -> tomlkit.api.Table:
"""
Converts given layer_build_definition into toml table representation
Parameters
----------
layer_build_definition: LayerBuildDefinition
LayerBuildDefinition which will be converted into toml table
Returns
-------
tomlkit.api.Table
toml table of LayerBuildDefinition
"""
toml_table = tomlkit.table()
toml_table[LAYER_NAME_FIELD] = layer_build_definition.full_path
toml_table[CODE_URI_FIELD] = layer_build_definition.codeuri
toml_table[BUILD_METHOD_FIELD] = layer_build_definition.build_method
toml_table[COMPATIBLE_RUNTIMES_FIELD] = layer_build_definition.compatible_runtimes
toml_table[ARCHITECTURE_FIELD] = layer_build_definition.architecture
if layer_build_definition.source_hash:
toml_table[SOURCE_HASH_FIELD] = layer_build_definition.source_hash
toml_table[MANIFEST_HASH_FIELD] = layer_build_definition.manifest_hash
if layer_build_definition.env_vars:
toml_table[ENV_VARS_FIELD] = layer_build_definition.env_vars
toml_table[LAYER_FIELD] = layer_build_definition.layer.full_path
return toml_table
def _toml_table_to_layer_build_definition(uuid: str, toml_table: tomlkit.api.Table) -> "LayerBuildDefinition":
"""
Converts given toml table into LayerBuildDefinition instance
Parameters
----------
uuid: str
key of the toml_table instance
toml_table: tomlkit.api.Table
layer build definition as toml table
Returns
-------
LayerBuildDefinition
LayerBuildDefinition of given toml table
"""
layer_build_definition = LayerBuildDefinition(
toml_table.get(LAYER_NAME_FIELD),
toml_table.get(CODE_URI_FIELD),
toml_table.get(BUILD_METHOD_FIELD),
toml_table.get(COMPATIBLE_RUNTIMES_FIELD),
toml_table.get(ARCHITECTURE_FIELD, X86_64),
toml_table.get(SOURCE_HASH_FIELD, ""),
toml_table.get(MANIFEST_HASH_FIELD, ""),
dict(toml_table.get(ENV_VARS_FIELD, {})),
)
layer_build_definition.uuid = uuid
return layer_build_definition
class BuildHashingInformation(NamedTuple):
"""
Holds hashing information for the source folder and the manifest file
"""
source_hash: str
manifest_hash: str
class BuildGraph:
"""
Contains list of build definitions, with ability to read and write them into build.toml file
"""
# private lock for build.toml reads and writes
__toml_lock = threading.Lock()
# global table build definitions key
FUNCTION_BUILD_DEFINITIONS = "function_build_definitions"
LAYER_BUILD_DEFINITIONS = "layer_build_definitions"
def __init__(self, build_dir: str) -> None:
# put build.toml file inside .aws-sam folder
self._filepath = Path(build_dir).parent.joinpath(DEFAULT_BUILD_GRAPH_FILE_NAME)
self._function_build_definitions: List["FunctionBuildDefinition"] = []
self._layer_build_definitions: List["LayerBuildDefinition"] = []
self._atomic_read()
def get_function_build_definitions(self) -> Tuple["FunctionBuildDefinition", ...]:
return tuple(self._function_build_definitions)
def get_layer_build_definitions(self) -> Tuple["LayerBuildDefinition", ...]:
return tuple(self._layer_build_definitions)
def get_function_build_definition_with_full_path(
self, function_full_path: str
) -> Optional["FunctionBuildDefinition"]:
"""
Returns FunctionBuildDefinition instance of given function logical id.
Parameters
----------
function_full_path : str
Function full path that will be searched in the function build definitions
Returns
-------
Optional[FunctionBuildDefinition]
If a function build definition found returns it, otherwise returns None
"""
for function_build_definition in self._function_build_definitions:
for build_definition_function in function_build_definition.functions:
if build_definition_function.full_path == function_full_path:
return function_build_definition
return None
def put_function_build_definition(
self, function_build_definition: "FunctionBuildDefinition", function: Function
) -> None:
"""
Puts the newly read function build definition into existing build graph.
If graph already contains a function build definition which is same as the newly passed one, then it will add
the function to the existing one, discarding the new one
If graph doesn't contain such unique function build definition, it will be added to the current build graph
Parameters
----------
function_build_definition: FunctionBuildDefinition
function build definition which is newly read from template.yaml file
function: Function
function details for this function build definition
"""
if function_build_definition in self._function_build_definitions:
previous_build_definition = self._function_build_definitions[
self._function_build_definitions.index(function_build_definition)
]
LOG.debug(
"Same function build definition found, adding function (Previous: %s, Current: %s, Function: %s)",
previous_build_definition,
function_build_definition,
function,
)
previous_build_definition.add_function(function)
else:
LOG.debug(
"Unique function build definition found, adding as new (Function Build Definition: %s, Function: %s)",
function_build_definition,
function,
)
function_build_definition.add_function(function)
self._function_build_definitions.append(function_build_definition)
def put_layer_build_definition(self, layer_build_definition: "LayerBuildDefinition", layer: LayerVersion) -> None:
"""
Puts the newly read layer build definition into existing build graph.
If graph already contains a layer build definition which is same as the newly passed one, then it will add
the layer to the existing one, discarding the new one
If graph doesn't contain such unique layer build definition, it will be added to the current build graph
Parameters
----------
layer_build_definition: LayerBuildDefinition
layer build definition which is newly read from template.yaml file
layer: Layer
layer details for this layer build definition
"""
if layer_build_definition in self._layer_build_definitions:
previous_build_definition = self._layer_build_definitions[
self._layer_build_definitions.index(layer_build_definition)
]
LOG.debug(
"Same Layer build definition found, adding layer (Previous: %s, Current: %s, Layer: %s)",
previous_build_definition,
layer_build_definition,
layer,
)
previous_build_definition.layer = layer
else:
LOG.debug(
"Unique Layer build definition found, adding as new (Layer Build Definition: %s, Layer: %s)",
layer_build_definition,
layer,
)
layer_build_definition.layer = layer
self._layer_build_definitions.append(layer_build_definition)
def clean_redundant_definitions_and_update(self, persist: bool) -> None:
"""
Removes build definitions which doesn't have any function in it, which means these build definitions
are no longer used, and they can be deleted
If persist parameter is given True, build graph is written to .aws-sam/build.toml file
"""
self._function_build_definitions[:] = [
fbd for fbd in self._function_build_definitions if len(fbd.functions) > 0
]
self._layer_build_definitions[:] = [bd for bd in self._layer_build_definitions if bd.layer]
if persist:
self._atomic_write()
def update_definition_hash(self) -> None:
"""
Updates the build.toml file with the newest source_hash values of the partial build's definitions
This operation is atomic, that no other thread accesses build.toml
during the process of reading and modifying the hash value
"""
with BuildGraph.__toml_lock:
stored_function_definitions = copy.deepcopy(self._function_build_definitions)
stored_layer_definitions = copy.deepcopy(self._layer_build_definitions)
self._read()
function_content = BuildGraph._compare_hash_changes(
stored_function_definitions, self._function_build_definitions
)
layer_content = BuildGraph._compare_hash_changes(stored_layer_definitions, self._layer_build_definitions)
if function_content or layer_content:
self._write_source_hash(function_content, layer_content)
self._function_build_definitions = stored_function_definitions
self._layer_build_definitions = stored_layer_definitions
@staticmethod
def _compare_hash_changes(
input_list: Sequence["AbstractBuildDefinition"], compared_list: Sequence["AbstractBuildDefinition"]
) -> Dict[str, BuildHashingInformation]:
"""
Helper to compare the function and layer definition changes in hash value
Returns a dictionary that has uuid as key, updated hash value as value
"""
content = {}
for compared_def in compared_list:
for stored_def in input_list:
if stored_def == compared_def:
old_hash = compared_def.source_hash
updated_hash = stored_def.source_hash
old_manifest_hash = compared_def.manifest_hash
updated_manifest_hash = stored_def.manifest_hash
uuid = stored_def.uuid
if old_hash != updated_hash or old_manifest_hash != updated_manifest_hash:
content[uuid] = BuildHashingInformation(updated_hash, updated_manifest_hash)
compared_def.download_dependencies = old_manifest_hash != updated_manifest_hash
return content
def _write_source_hash(
self, function_content: Dict[str, BuildHashingInformation], layer_content: Dict[str, BuildHashingInformation]
) -> None:
"""
Helper to write source_hash values to build.toml file
"""
document = {}
if not self._filepath.exists():
open(self._filepath, "a+").close() # pylint: disable=consider-using-with
txt = self._filepath.read_text()
# .loads() returns a TOMLDocument,
# and it behaves like a standard dictionary according to https://github.com/sdispater/tomlkit.
# in tomlkit 0.7.2, the types are broken (tomlkit#128, #130, #134) so here we convert it to Dict.
document = cast(Dict, tomlkit.loads(txt))
for function_uuid, hashing_info in function_content.items():
if function_uuid in document.get(BuildGraph.FUNCTION_BUILD_DEFINITIONS, {}):
function_build_definition = document[BuildGraph.FUNCTION_BUILD_DEFINITIONS][function_uuid]
function_build_definition[SOURCE_HASH_FIELD] = hashing_info.source_hash
function_build_definition[MANIFEST_HASH_FIELD] = hashing_info.manifest_hash
LOG.info(
"Updated source_hash and manifest_hash field in build.toml for function with UUID %s", function_uuid
)
for layer_uuid, hashing_info in layer_content.items():
if layer_uuid in document.get(BuildGraph.LAYER_BUILD_DEFINITIONS, {}):
layer_build_definition = document[BuildGraph.LAYER_BUILD_DEFINITIONS][layer_uuid]
layer_build_definition[SOURCE_HASH_FIELD] = hashing_info.source_hash
layer_build_definition[MANIFEST_HASH_FIELD] = hashing_info.manifest_hash
LOG.info("Updated source_hash and manifest_hash field in build.toml for layer with UUID %s", layer_uuid)
self._filepath.write_text(tomlkit.dumps(document)) # type: ignore
def _read(self) -> None:
"""
Reads build.toml file into array of build definition
Each build definition will have empty function list, which will be populated from the current template.yaml file
"""
LOG.debug("Instantiating build definitions")
self._function_build_definitions = []
self._layer_build_definitions = []
document = {}
try:
txt = self._filepath.read_text()
# .loads() returns a TOMLDocument,
# and it behaves like a standard dictionary according to https://github.com/sdispater/tomlkit.
# in tomlkit 0.7.2, the types are broken (tomlkit#128, #130, #134) so here we convert it to Dict.
document = cast(Dict, tomlkit.loads(txt))
except OSError:
LOG.debug("No previous build graph found, generating new one")
function_build_definitions_table = document.get(BuildGraph.FUNCTION_BUILD_DEFINITIONS, {})
for function_build_definition_key in function_build_definitions_table:
function_build_definition = _toml_table_to_function_build_definition(
function_build_definition_key, function_build_definitions_table[function_build_definition_key]
)
self._function_build_definitions.append(function_build_definition)
layer_build_definitions_table = document.get(BuildGraph.LAYER_BUILD_DEFINITIONS, {})
for layer_build_definition_key in layer_build_definitions_table:
layer_build_definition = _toml_table_to_layer_build_definition(
layer_build_definition_key, layer_build_definitions_table[layer_build_definition_key]
)
self._layer_build_definitions.append(layer_build_definition)
def _atomic_read(self) -> None:
"""
Performs the _read() method with a global lock acquired
It makes sure no other thread accesses build.toml when a read is happening
"""
with BuildGraph.__toml_lock:
self._read()
def _write(self) -> None:
"""
Writes build definition details into build.toml file, which would be used by the next build.
build.toml file will contain the same information as build graph,
function details will only be preserved as function names
layer details will only be preserved as layer names
"""
# convert build definition list into toml table
function_build_definitions_table = tomlkit.table()
for function_build_definition in self._function_build_definitions:
build_definition_as_table = _function_build_definition_to_toml_table(function_build_definition)
function_build_definitions_table.add(function_build_definition.uuid, build_definition_as_table)
layer_build_definitions_table = tomlkit.table()
for layer_build_definition in self._layer_build_definitions:
build_definition_as_table = _layer_build_definition_to_toml_table(layer_build_definition)
layer_build_definitions_table.add(layer_build_definition.uuid, build_definition_as_table)
# create toml document and add build definitions
document = tomlkit.document()
document.add(tomlkit.comment("This file is auto generated by SAM CLI build command"))
# we need to cast `Table` to `Item` because of tomlkit#135.
document.add(BuildGraph.FUNCTION_BUILD_DEFINITIONS, cast(tomlkit.items.Item, function_build_definitions_table))
document.add(BuildGraph.LAYER_BUILD_DEFINITIONS, cast(tomlkit.items.Item, layer_build_definitions_table))
if not self._filepath.exists():
open(self._filepath, "a+").close() # pylint: disable=consider-using-with
self._filepath.write_text(tomlkit.dumps(document))
def _atomic_write(self) -> None:
"""
Performs the _write() method with a global lock acquired
It makes sure no other thread accesses build.toml when a write is happening
"""
with BuildGraph.__toml_lock:
self._write()
class AbstractBuildDefinition:
"""
Abstract class for build definition
Build definition holds information about each unique build
"""
def __init__(
self, source_hash: str, manifest_hash: str, env_vars: Optional[Dict] = None, architecture: str = X86_64
) -> None:
self.uuid = str(uuid4())
self.source_hash = source_hash
self.manifest_hash = manifest_hash
self._env_vars = env_vars if env_vars else {}
self.architecture = architecture
# following properties are used during build time and they don't serialize into build.toml file
self.download_dependencies: bool = True
@property
def dependencies_dir(self) -> str:
return str(os.path.join(DEFAULT_DEPENDENCIES_DIR, self.uuid))
@property
def env_vars(self) -> Dict:
return deepcopy(self._env_vars)
class LayerBuildDefinition(AbstractBuildDefinition):
"""
LayerBuildDefinition holds information about each unique layer build
"""
def __init__(
self,
full_path: str,
codeuri: Optional[str],
build_method: Optional[str],
compatible_runtimes: Optional[List[str]],
architecture: str,
source_hash: str = "",
manifest_hash: str = "",
env_vars: Optional[Dict] = None,
):
super().__init__(source_hash, manifest_hash, env_vars, architecture)
self.full_path = full_path
self.codeuri = codeuri
self.build_method = build_method
self.compatible_runtimes = compatible_runtimes
# Note(xinhol): In our code, we assume "layer" is never None. We should refactor
# this and move "layer" out of LayerBuildDefinition to take advantage of type check.
self.layer: LayerVersion = None # type: ignore
def __str__(self) -> str:
return (
f"LayerBuildDefinition({self.full_path}, {self.codeuri}, {self.source_hash}, {self.uuid}, "
f"{self.build_method}, {self.compatible_runtimes}, {self.architecture}, {self.env_vars})"
)
def __eq__(self, other: Any) -> bool:
"""
Checks equality of the layer build definition
Parameters
----------
other: Any
other layer build definition to compare
Returns
-------
bool
True if both layer build definitions has same following properties, False otherwise
"""
if not isinstance(other, LayerBuildDefinition):
return False
return (
self.full_path == other.full_path
and self.codeuri == other.codeuri
and self.build_method == other.build_method
and self.compatible_runtimes == other.compatible_runtimes
and self.env_vars == other.env_vars
and self.architecture == other.architecture
)
class FunctionBuildDefinition(AbstractBuildDefinition):
"""
LayerBuildDefinition holds information about each unique function build
"""
def __init__(
self,
runtime: Optional[str],
codeuri: Optional[str],
packagetype: str,
architecture: str,
metadata: Optional[Dict],
handler: Optional[str],
source_hash: str = "",
manifest_hash: str = "",
env_vars: Optional[Dict] = None,
) -> None:
super().__init__(source_hash, manifest_hash, env_vars, architecture)
self.runtime = runtime
self.codeuri = codeuri
self.packagetype = packagetype
self.handler = handler
# Skip SAM Added metadata properties
metadata_copied = deepcopy(metadata) if metadata else {}
metadata_copied.pop(SAM_RESOURCE_ID_KEY, "")
metadata_copied.pop(SAM_IS_NORMALIZED, "")
self.metadata = metadata_copied
self.functions: List[Function] = []
def add_function(self, function: Function) -> None:
self.functions.append(function)
def get_function_name(self) -> str:
self._validate_functions()
return self.functions[0].name
def get_handler_name(self) -> Optional[str]:
self._validate_functions()
return self.functions[0].handler
def get_full_path(self) -> str:
"""
Return the build identifier of the first function
"""
self._validate_functions()
return self.functions[0].full_path
def get_build_dir(self, artifact_root_dir: str) -> str:
"""
Return the directory path relative to root build directory
"""
self._validate_functions()
return self.functions[0].get_build_dir(artifact_root_dir)
def _validate_functions(self) -> None:
if not self.functions:
raise InvalidBuildGraphException("Build definition doesn't have any function definition to build")
def __str__(self) -> str:
return (
"BuildDefinition("
f"{self.runtime}, {self.codeuri}, {self.packagetype}, {self.source_hash}, "
f"{self.uuid}, {self.metadata}, {self.env_vars}, {self.architecture}, "
f"{[f.functionname for f in self.functions]})"
)
def __eq__(self, other: Any) -> bool:
"""
Checks equality of the function build definition
Parameters
----------
other: Any
other function build definition to compare
Returns
-------
bool
True if both function build definitions has same following properties, False otherwise
"""
if not isinstance(other, FunctionBuildDefinition):
return False
# each build with custom Makefile definition should be handled separately
if self.metadata and self.metadata.get("BuildMethod", None) == "makefile":
return False
if self.metadata and self.metadata.get("BuildMethod", None) == "esbuild":
# For esbuild, we need to check if handlers within the same CodeUri are the same
# if they are different, it should create a separate build definition
if self.handler != other.handler:
return False
return (
self.runtime == other.runtime
and self.codeuri == other.codeuri
and self.packagetype == other.packagetype
and self.metadata == other.metadata
and self.env_vars == other.env_vars
and self.architecture == other.architecture
)
|
the-stack_0_4814 | '''
userManager for Docklet
provide a class for managing users and usergroups in Docklet
Warning: in some early versions, "token" stand for the instance of class model.User
now it stands for a string that can be parsed to get that instance.
in all functions start with "@administration_required" or "@administration_or_self_required", "token" is the instance
Original author: Liu Peidong
'''
from model import db, User, UserGroup, Notification, UserUsage
from functools import wraps
import os, subprocess, math
import hashlib
import pam
from base64 import b64encode
import env
from settings import settings
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
from datetime import datetime
import json
from log import logger
from lvmtool import *
PAM = pam.pam()
fspath = env.getenv('FS_PREFIX')
data_quota = env.getenv('DATA_QUOTA')
data_quota_cmd = env.getenv('DATA_QUOTA_CMD')
if (env.getenv('EXTERNAL_LOGIN').lower() == 'true'):
from plugin import external_receive
def administration_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if ( ('cur_user' in kwargs) == False):
return {"success":'false', "reason":"Cannot get cur_user"}
cur_user = kwargs['cur_user']
if ((cur_user.user_group == 'admin') or (cur_user.user_group == 'root')):
return func(*args, **kwargs)
else:
return {"success": 'false', "reason": 'Unauthorized Action'}
return wrapper
def administration_or_self_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if ( (not ('cur_user' in kwargs)) or (not ('user' in kwargs))):
return {"success":'false', "reason":"Cannot get cur_user or user"}
cur_user = kwargs['cur_user']
user = kwargs['user']
if ((cur_user.user_group == 'admin') or (cur_user.user_group == 'root') or (cur_user.username == user.username)):
return func(*args, **kwargs)
else:
return {"success": 'false', "reason": 'Unauthorized Action'}
return wrapper
def token_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if ( ('cur_user' in kwargs) == False):
return {"success":'false', "reason":"Cannot get cur_user"}
return func(*args, **kwargs)
return wrapper
def send_activated_email(to_address, username):
email_from_address = settings.get('EMAIL_FROM_ADDRESS')
if (email_from_address in ['\'\'', '\"\"', '']):
return
#text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated'
text = '<html><h4>Dear '+ username + ':</h4>'
text += '''<p> Your account in <a href='%s'>%s</a> has been activated</p>
<p> Enjoy your personal workspace in the cloud !</p>
<br>
<p> Note: DO NOT reply to this email!</p>
<br><br>
<p> <a href='http://docklet.unias.org'>Docklet Team</a>, SEI, PKU</p>
''' % (env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL"))
text += '<p>'+ str(datetime.now()) + '</p>'
text += '</html>'
subject = 'Docklet account activated'
msg = MIMEMultipart()
textmsg = MIMEText(text,'html','utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = email_from_address
msg['To'] = to_address
msg.attach(textmsg)
s = smtplib.SMTP()
s.connect()
s.sendmail(email_from_address, to_address, msg.as_string())
s.close()
def send_remind_activating_email(username):
#admin_email_address = env.getenv('ADMIN_EMAIL_ADDRESS')
nulladdr = ['\'\'', '\"\"', '']
email_from_address = settings.get('EMAIL_FROM_ADDRESS')
admin_email_address = settings.get('ADMIN_EMAIL_ADDRESS')
if (email_from_address in nulladdr or admin_email_address in nulladdr):
return
#text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated'
text = '<html><h4>Dear '+ 'admin' + ':</h4>'
text += '''<p> An activating request for %s in <a href='%s'>%s</a> has been sent</p>
<p> Please check it !</p>
<br/><br/>
<p> Docklet Team, SEI, PKU</p>
''' % (username, env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL"))
text += '<p>'+ str(datetime.utcnow()) + '</p>'
text += '</html>'
subject = 'An activating request in Docklet has been sent'
if admin_email_address[0] == '"':
admins_addr = admin_email_address[1:-1].split(" ")
else:
admins_addr = admin_email_address.split(" ")
alladdr=""
for addr in admins_addr:
alladdr = alladdr+addr+", "
alladdr=alladdr[:-2]
msg = MIMEMultipart()
textmsg = MIMEText(text,'html','utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = email_from_address
msg['To'] = alladdr
msg.attach(textmsg)
s = smtplib.SMTP()
s.connect()
try:
s.sendmail(email_from_address, admins_addr, msg.as_string())
except Exception as e:
logger.error(e)
s.close()
class userManager:
def __init__(self, username = 'root', password = None):
'''
Try to create the database when there is none
initialize 'root' user and 'root' & 'primary' group
'''
try:
User.query.all()
except:
db.create_all()
if password == None:
#set a random password
password = os.urandom(16)
password = b64encode(password).decode('utf-8')
fsdir = env.getenv('FS_PREFIX')
f = open(fsdir + '/local/generated_password.txt', 'w')
f.write("User=%s\nPass=%s\n"%(username, password))
f.close()
sys_admin = User(username, hashlib.sha512(password.encode('utf-8')).hexdigest())
sys_admin.status = 'normal'
sys_admin.nickname = 'root'
sys_admin.description = 'Root_User'
sys_admin.user_group = 'root'
sys_admin.auth_method = 'local'
db.session.add(sys_admin)
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/userinit.sh", username])
db.session.commit()
if not os.path.exists(fspath+"/global/sys/quota"):
groupfile = open(fspath+"/global/sys/quota",'w')
groups = []
groups.append({'name':'root', 'quotas':{ 'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'admin', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'primary', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'foundation', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groupfile.write(json.dumps(groups))
groupfile.close()
if not os.path.exists(fspath+"/global/sys/quotainfo"):
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotas = {}
quotas['default'] = 'foundation'
quotas['quotainfo'] = []
quotas['quotainfo'].append({'name':'cpu', 'hint':'the cpu quota, number of cores, e.g. 4'})
quotas['quotainfo'].append({'name':'memory', 'hint':'the memory quota, number of MB , e.g. 4000'})
quotas['quotainfo'].append({'name':'disk', 'hint':'the disk quota, number of MB, e.g. 4000'})
quotas['quotainfo'].append({'name':'data', 'hint':'the quota of data space, number of GB, e.g. 100'})
quotas['quotainfo'].append({'name':'image', 'hint':'how many images the user can save, e.g. 10'})
quotas['quotainfo'].append({'name':'idletime', 'hint':'will stop cluster after idletime, number of hours, e.g. 24'})
quotas['quotainfo'].append({'name':'vnode', 'hint':'how many containers the user can have, e.g. 8'})
quotas['quotainfo'].append({'name':'portmapping', 'hint':'how many ports the user can map, e.g. 8'})
quotas['quotainfo'].append({'name':'input_rate_limit', 'hint':'the ingress speed of the network, number of kbps. 0 means the rate are unlimited.'})
quotas['quotainfo'].append({'name':'output_rate_limit', 'hint':'the egress speed of the network, number of kbps. 0 means the rate are unlimited.'})
quotafile.write(json.dumps(quotas))
quotafile.close()
if not os.path.exists(fspath+"/global/sys/lxc.default"):
settingfile = open(fspath+"/global/sys/lxc.default", 'w')
settings = {}
settings['cpu'] = "2"
settings["memory"] = "2000"
settings["disk"] = "2000"
settingfile.write(json.dumps(settings))
settingfile.close()
try:
UserUsage.query.all()
except:
db.create_all()
def auth_local(self, username, password):
password = hashlib.sha512(password.encode('utf-8')).hexdigest()
user = User.query.filter_by(username = username).first()
if (user == None):
return {"success":'false', "reason": "User did not exist"}
if (user.password != password):
return {"success":'false', "reason": "Wrong password"}
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
def auth_pam(self, username, password):
user = User.query.filter_by(username = username).first()
pamresult = PAM.authenticate(username, password)
if (pamresult == False or (user != None and user.auth_method != 'pam')):
return {"success":'false', "reason": "Wrong password or wrong login method"}
if (user == None):
newuser = self.newuser();
newuser.username = username
newuser.password = "no_password"
newuser.nickname = username
newuser.status = "init"
newuser.user_group = "primary"
newuser.auth_method = "pam"
self.register(user = newuser)
user = User.query.filter_by(username = username).first()
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
def auth_external(self, form):
if (env.getenv('EXTERNAL_LOGIN') != 'True'):
failed_result = {'success': 'false', 'reason' : 'external auth disabled'}
return failed_result
result = external_receive.external_auth_receive_request(form)
if (result['success'] != 'True'):
failed_result = {'success':'false', 'result': result}
return failed_result
username = result['username']
user = User.query.filter_by(username = username).first()
if (user != None and user.auth_method == result['auth_method']):
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
if (user != None and user.auth_method != result['auth_method']):
result = {'success': 'false', 'reason': 'other kinds of account already exists'}
return result
#user == None , register an account for external user
newuser = self.newuser();
newuser.username = result['username']
newuser.password = result['password']
newuser.avatar = result['avatar']
newuser.nickname = result['nickname']
newuser.description = result['description']
newuser.e_mail = result['e_mail']
newuser.truename = result['truename']
newuser.student_number = result['student_number']
newuser.status = result['status']
newuser.user_group = result['user_group']
newuser.auth_method = result['auth_method']
newuser.department = result['department']
newuser.tel = result['tel']
self.register(user = newuser)
user = User.query.filter_by(username = username).first()
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
def auth(self, username, password):
'''
authenticate a user by username & password
return a token as well as some user information
'''
user = User.query.filter_by(username = username).first()
if (user == None or user.auth_method =='pam'):
return self.auth_pam(username, password)
elif (user.auth_method == 'local'):
return self.auth_local(username, password)
else:
result = {'success':'false', 'reason':'auth_method error'}
return result
def auth_token(self, token):
'''
authenticate a user by a token
when succeeded, return the database iterator
otherwise return None
'''
user = User.verify_auth_token(token)
return user
def set_nfs_quota_bygroup(self,groupname, quota):
if not data_quota == "True":
return
users = User.query.filter_by(user_group = groupname).all()
for user in users:
self.set_nfs_quota(user.username, quota)
def set_nfs_quota(self, username, quota):
if not data_quota == "True":
return
nfspath = "/users/%s/data" % username
try:
cmd = data_quota_cmd % (nfspath,quota+"GB")
sys_run(cmd.strip('"'))
except Exception as e:
logger.error(e)
@administration_required
def query(*args, **kwargs):
'''
Usage: query(username = 'xxx', cur_user = token_from_auth)
|| query(ID = a_integer, cur_user = token_from_auth)
Provide information about one user that administrators need to use
'''
if ( 'ID' in kwargs):
user = User.query.filter_by(id = kwargs['ID']).first()
if (user == None):
return {"success":False, "reason":"User does not exist"}
result = {
"success":'true',
"data":{
"username" : user.username,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"description" : user.description,
"beans" : user.beans,
},
"token": user
}
return result
if ( 'username' not in kwargs):
return {"success":'false', "reason":"Cannot get 'username'"}
username = kwargs['username']
user = User.query.filter_by(username = username).first()
if (user == None):
return {"success":'false', "reason":"User does not exist"}
result = {
"success": 'true',
"data":{
"username" : user.username,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"beans" : user.beans,
},
"token": user
}
return result
@token_required
def selfQuery(*args, **kwargs):
'''
Usage: selfQuery(cur_user = token_from_auth)
List informantion for oneself
'''
user = kwargs['cur_user']
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
group = None
for one_group in groups:
if one_group['name'] == user.user_group:
group = one_group['quotas']
break
else:
for one_group in groups:
if one_group['name'] == "primary":
group = one_group['quotas']
break
result = {
"success": 'true',
"data":{
"username" : user.username,
"id": user.id,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"groupinfo": group,
"beans" : user.beans,
"auth_method": user.auth_method,
},
}
return result
@token_required
def selfModify(*args, **kwargs):
'''
Usage: selfModify(cur_user = token_from_auth, newValue = form)
Modify informantion for oneself
'''
form = kwargs['newValue']
name = form.get('name', None)
value = form.get('value', None)
if (name == None or value == None):
result = {'success': 'false'}
return result
user = User.query.filter_by(username = kwargs['cur_user'].username).first()
if (name == 'nickname'):
user.nickname = value
elif (name == 'description'):
user.description = value
elif (name == 'department'):
user.department = value
elif (name == 'e_mail'):
user.e_mail = value
elif (name == 'tel'):
user.tel = value
elif (name == 'password'):
old_password = hashlib.sha512(form.get('old_value', '').encode('utf-8')).hexdigest()
if (user.password != old_password):
result = {'success': 'false'}
return result
user.password = hashlib.sha512(value.encode('utf-8')).hexdigest()
else:
result = {'success': 'false'}
return result
db.session.commit()
result = {'success': 'true'}
return result
@token_required
def usageQuery(self, *args, **kwargs):
'''
Usage: usageQuery(cur_user = token_from_auth)
Query the quota and usage of user
'''
cur_user = kwargs['cur_user']
groupname = cur_user.user_group
groupinfo = self.groupQuery(name = groupname)['data']
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
usageinfo = {
'username': cur_user.username,
'cpu': '0',
'memory': '0',
'disk': '0'
}
else:
usageinfo = {
'username': usage.username,
'cpu': usage.cpu,
'memory': usage.memory,
'disk': usage.disk
}
settingfile = open(fspath+"/global/sys/lxc.default" , 'r')
defaultsetting = json.loads(settingfile.read())
settingfile.close()
return {'success': 'true', 'quota' : groupinfo, 'usage' : usageinfo, 'default': defaultsetting }
@token_required
def usageInc(self, *args, **kwargs):
'''
Usage: usageModify(cur_user = token_from_auth, modification = data_from_form)
Modify the usage info of user
'''
cur_user = kwargs['cur_user']
modification = kwargs['modification']
logger.info("record usage for user:%s" % cur_user.username)
groupname = cur_user.user_group
groupinfo = self.groupQuery(name = groupname)['data']
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if int(modification['cpu']) <= 0 or int(modification['memory']) <= 0 or int(modification['disk']) <= 0:
return {'success':False, 'result':"cpu,memory and disk setting cannot less than zero"}
cpu = int(usage.cpu) + int(modification['cpu'])
memory = int(usage.memory) + int(modification['memory'])
disk = int(usage.disk) + int(modification['disk'])
if cpu > int(groupinfo['cpu']):
logger.error("cpu quota exceed, user:%s" % cur_user.username)
return {'success':False, 'result':"cpu quota exceed"}
if memory > int(groupinfo['memory']):
logger.error("memory quota exceed, user:%s" % cur_user.username)
return {'success':False, 'result':"memory quota exceed"}
if disk > int(groupinfo['disk']):
logger.error("disk quota exceed, user:%s" % cur_user.username)
return {'success':False, 'result':"disk quota exceed"}
usage.cpu = str(cpu)
usage.memory = str(memory)
usage.disk = str(disk)
db.session.commit()
return {'success':True, 'result':"distribute the resource"}
@token_required
def usageRecover(self, *args, **kwargs):
'''
Usage: usageModify(cur_user = token_from_auth, modification = data_from_form)
Recover the usage info when create container failed
'''
cur_user = kwargs['cur_user']
modification = kwargs['modification']
logger.info("recover usage for user:%s" % cur_user.username)
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
usage = UserUsage.query.filter_by(username = cur_user.username).first()
return True
cpu = int(usage.cpu) - int(modification['cpu'])
memory = int(usage.memory) - int(modification['memory'])
disk = int(usage.disk) - int(modification['disk'])
if cpu < 0:
cpu = 0
if memory < 0:
memory = 0
if disk < 0:
disk = 0
usage.cpu = str(cpu)
usage.memory = str(memory)
usage.disk = str(disk)
db.session.commit()
return {'success':True}
@token_required
def usageRelease(self, *args, **kwargs):
cur_user = kwargs['cur_user']
cpu = kwargs['cpu']
memory = kwargs['memory']
disk = kwargs['disk']
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
return {'success':True}
nowcpu = int(usage.cpu) - int(cpu)
nowmemory = int(usage.memory) - int(memory)
nowdisk = int(usage.disk) - int(disk)
if nowcpu < 0:
nowcpu = 0
if nowmemory < 0:
nowmemory = 0
if nowdisk < 0:
nowdisk = 0
usage.cpu = str(nowcpu)
usage.memory = str(nowmemory)
usage.disk = str(nowdisk)
db.session.commit()
return {'success':True}
def initUsage(*args, **kwargs):
"""
init the usage info when start docklet with init mode
"""
usages = UserUsage.query.all()
for usage in usages:
usage.cpu = "0"
usage.memory = "0"
usage.disk = "0"
db.session.commit()
return True
@administration_required
def userList(*args, **kwargs):
'''
Usage: list(cur_user = token_from_auth)
List all users for an administrator
'''
alluser = User.query.all()
result = {
"success": 'true',
"data":[]
}
for user in alluser:
userinfo = [
user.id,
user.username,
user.truename,
user.e_mail,
user.tel,
"%s"%(user.register_date),
user.status,
user.user_group,
user.beans,
'',
]
result["data"].append(userinfo)
return result
@administration_required
def groupList(*args, **kwargs):
'''
Usage: list(cur_user = token_from_auth)
List all groups for an administrator
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
result = {
"success": 'true',
"groups": groups,
"quotas": quotas['quotainfo'],
"default": quotas['default'],
}
return result
@administration_required
def change_default_group(*args, **kwargs):
form = kwargs['form']
default_group = form.get('defaultgroup')
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
quotas['default'] = default_group
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotafile.write(json.dumps(quotas))
quotafile.close()
return { 'success':'true', 'action':'change default group' }
def groupQuery(self, *args, **kwargs):
'''
Usage: groupQuery(name = XXX, cur_user = token_from_auth)
List a group for an administrator
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
if group['name'] == kwargs['name']:
result = {
"success":'true',
"data": group['quotas'],
}
return result
else:
return {"success":False, "reason":"Group does not exist"}
@administration_required
def groupListName(*args, **kwargs):
'''
Usage: grouplist(cur_user = token_from_auth)
List all group names for an administrator
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
result = {
"groups": [],
}
for group in groups:
result["groups"].append(group['name'])
return result
@administration_required
def groupModify(self, *args, **kwargs):
'''
Usage: groupModify(newValue = dict_from_form, cur_user = token_from_auth)
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
if group['name'] == kwargs['newValue'].get('groupname',None):
form = kwargs['newValue']
for key in form.keys():
if key == "data":
if not group['quotas'][key] == form.get(key):
self.set_nfs_quota_bygroup(group['name'],form.get(key))
else:
pass
if key == "groupname" or key == "token":
pass
else:
if key == "vnode":
vnode = int(form['vnode'])
val = str(2**(round(math.log(vnode+3, 2))) - 3 )
group["quotas"][key] = val
else:
group['quotas'][key] = form.get(key)
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
return {"success":'true'}
else:
return {"success":'false', "reason":"UserGroup does not exist"}
@administration_required
def modify(self, *args, **kwargs):
'''
modify a user's information in database
will send an e-mail when status is changed from 'applying' to 'normal'
Usage: modify(newValue = dict_from_form, cur_user = token_from_auth)
'''
if ( kwargs['newValue'].get('Instruction', '') == 'Activate'):
user_modify = User.query.filter_by(id = kwargs['newValue'].get('ID', None)).first()
user_modify.status = 'normal'
send_activated_email(user_modify.e_mail, user_modify.username)
db.session.commit()
return {"success": "true"}
if ( kwargs['newValue'].get('password', '') != ''):
user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first()
new_password = kwargs['newValue'].get('password','')
new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest()
user_modify.password = new_password
db.session.commit()
return {"success": "true"}
user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first()
if (user_modify == None):
return {"success":'false', "reason":"User does not exist"}
#try:
form = kwargs['newValue']
user_modify.truename = form.get('truename', '')
user_modify.e_mail = form.get('e_mail', '')
user_modify.department = form.get('department', '')
user_modify.student_number = form.get('student_number', '')
user_modify.tel = form.get('tel', '')
user_modify.user_group = form.get('group', '')
user_modify.auth_method = form.get('auth_method', '')
if (user_modify.status == 'applying' and form.get('status', '') == 'normal'):
send_activated_email(user_modify.e_mail, user_modify.username)
user_modify.status = form.get('status', '')
#if (form.get('password', '') != ''):
#new_password = form.get('password','')
#new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest()
#user_modify.password = new_password
#self.chpassword(cur_user = user_modify, password = form.get('password','no_password'))
#modify password in another function now
db.session.commit()
res = self.groupQuery(name=user_modify.user_group)
if res['success']:
self.set_nfs_quota(user_modify.username,res['data']['data'])
return {"success":'true'}
#except:
#return {"success":'false', "reason":"Something happened"}
@token_required
def chpassword(*args, **kwargs):
'''
Usage: chpassword(cur_user = token_from_auth, password = 'your_password')
'''
cur_user = kwargs['cur_user']
cur_user.password = hashlib.sha512(kwargs['password'].encode('utf-8')).hexdigest()
def newuser(*args, **kwargs):
'''
Usage : newuser()
The only method to create a new user
call this method first, modify the return value which is a database row instance,then call self.register()
'''
user_new = User('newuser', 'asdf1234')
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
user_new.user_group = quotas['default']
user_new.avatar = 'default.png'
return user_new
def register(self, *args, **kwargs):
'''
Usage: register(user = modified_from_newuser())
'''
if (kwargs['user'].username == None or kwargs['user'].username == ''):
return {"success":'false', "reason": "Empty username"}
user_check = User.query.filter_by(username = kwargs['user'].username).first()
if (user_check != None and user_check.status != "init"):
#for the activating form
return {"success":'false', "reason": "Unauthorized action"}
newuser = kwargs['user']
if (user_check != None and (user_check.status == "init")):
db.session.delete(user_check)
db.session.commit()
else:
newuser.password = hashlib.sha512(newuser.password.encode('utf-8')).hexdigest()
db.session.add(newuser)
db.session.commit()
# if newuser status is normal, init some data for this user
# now initialize for all kind of users
#if newuser.status == 'normal':
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/userinit.sh", newuser.username])
res = self.groupQuery(name=newuser.user_group)
if res['success']:
self.set_nfs_quota(newuser.username,res['data']['data'])
return {"success":'true'}
@administration_required
def quotaadd(*args, **kwargs):
form = kwargs.get('form')
quotaname = form.get("quotaname")
default_value = form.get("default_value")
hint = form.get("hint")
if (quotaname == None):
return { "success":'false', "reason": "Empty quota name"}
if (default_value == None):
default_value = "--"
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
group['quotas'][quotaname] = default_value
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
quotas['quotainfo'].append({'name':quotaname, 'hint':hint})
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotafile.write(json.dumps(quotas))
quotafile.close()
return {"success":'true'}
@administration_required
def groupadd(*args, **kwargs):
form = kwargs.get('form')
groupname = form.get("groupname")
if (groupname == None):
return {"success":'false', "reason": "Empty group name"}
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
group = {
'name': groupname,
'quotas': {}
}
for key in form.keys():
if key == "groupname" or key == "token":
pass
else:
if key == "vnode":
vnode = int(form['vnode'])
val = str(2**(round(math.log(vnode+3, 2))) - 3 )
group['quotas'][key] = val
else:
group['quotas'][key] = form.get(key)
groups.append(group)
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
return {"success":'true'}
@administration_required
def groupdel(*args, **kwargs):
name = kwargs.get('name', None)
if (name == None):
return {"success":'false', "reason": "Empty group name"}
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
if group['name'] == name:
groups.remove(group)
break
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
return {"success":'true'}
@administration_required
def lxcsettingList(*args, **kwargs):
lxcsettingfile = open(fspath+"/global/sys/lxc.default", 'r')
lxcsetting = json.loads(lxcsettingfile.read())
lxcsettingfile.close()
return {"success": 'true', 'data':lxcsetting}
@administration_required
def chlxcsetting(*args, **kwargs):
form = kwargs['form']
lxcsetting = {}
lxcsetting['cpu'] = form['lxcCpu']
lxcsetting['memory'] = form['lxcMemory']
lxcsetting['disk'] = form['lxcDisk']
lxcsettingfile = open(fspath+"/global/sys/lxc.default", 'w')
lxcsettingfile.write(json.dumps(lxcsetting))
lxcsettingfile.close()
return {"success": 'true'}
@administration_required
def cloud_account_query(*args, **kwargs):
accountfile = open(fspath+"/global/sys/cloudaccount", 'r')
account = json.loads(accountfile.read())
accountfile.close()
return {"success": 'true', 'accounts':account}
@administration_required
def cloud_account_add(*args, **kwargs):
form = kwargs.get('form')
accountfile = open(fspath+"/global/sys/cloudaccount", 'r')
account = json.loads(accountfile.read())
accountfile.close()
account.append(
{ 'cloudname' : form['cloudname'],
'username' : form['username'],
'password' : form['password'],
})
accountfile = open(fspath+"/global/sys/cloudaccount", 'w')
accountfile.write(json.dumps(account))
accountfile.close()
return {"success": 'true'}
@administration_required
def cloud_account_del(*args, **kwargs):
form = kwargs.get('form')
cloudname = form['cloudname']
accountfile = open(fspath+"/global/sys/cloudaccount", 'r')
account = json.loads(accountfile.read())
accountfile.close()
for acc in account:
if acc['cloudname'] == cloudname:
account.remove(acc)
break
accountfile = open(fspath+"/global/sys/cloudaccount", 'w')
accountfile.write(json.dumps(account))
accountfile.close()
return {"success": 'true'}
@administration_required
def cloud_account_modify(*args, **kwargs):
form = kwargs.get('form')
cloudname = form['cloudname']
accountfile = open(fspath+"/global/sys/cloudaccount", 'r')
account = json.loads(accountfile.read())
accountfile.close()
for acc in account:
if acc['cloudname'] == cloudname:
acc['username'] = form['username']
acc['password'] = form['password']
break
accountfile = open(fspath+"/global/sys/cloudaccount", 'w')
accountfile.write(json.dumps(account))
accountfile.close()
return {"success": "true"}
def queryForDisplay(*args, **kwargs):
'''
Usage: queryForDisplay(user = token_from_auth)
Provide information about one user that administrators need to use
'''
if ( 'user' not in kwargs):
return {"success":'false', "reason":"Cannot get 'user'"}
user = kwargs['user']
if (user == None):
return {"success":'false', "reason":"User does not exist"}
result = {
"success": 'true',
"data":{
"username" : user.username,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"auth_method": user.auth_method,
}
}
return result
# def usermodify(rowID, columnID, newValue, cur_user):
# '''not used now'''
# user = um.query(ID = request.form["rowID"], cur_user = root).get('token', None)
# result = um.modify(user = user, columnID = request.form["columnID"], newValue = request.form["newValue"], cur_user = root)
# return json.dumps(result)
|
the-stack_0_4815 | #!python3
"""
Python 3 wrapper for identifying objects in images
Requires DLL compilation
Both the GPU and no-GPU version should be compiled; the no-GPU version should be renamed "yolo_cpp_dll_nogpu.dll".
On a GPU system, you can force CPU evaluation by any of:
- Set global variable DARKNET_FORCE_CPU to True
- Set environment variable CUDA_VISIBLE_DEVICES to -1
- Set environment variable "FORCE_CPU" to "true"
- Set environment variable "DARKNET_PATH" to path darknet lib .so (for Linux)
Directly viewing or returning bounding-boxed images requires scikit-image to be installed (`pip install scikit-image`)
Original *nix 2.7: https://github.com/pjreddie/darknet/blob/0f110834f4e18b30d5f101bf8f1724c34b7b83db/python/darknet.py
Windows Python 2.7 version: https://github.com/AlexeyAB/darknet/blob/fc496d52bf22a0bb257300d3c79be9cd80e722cb/build/darknet/x64/darknet.py
@author: Philip Kahn
@date: 20180503
"""
from ctypes import *
import math
import random
import os
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int),
("uc", POINTER(c_float)),
("points", c_int),
("embeddings", POINTER(c_float)),
("embedding_size", c_int),
("sim", c_float),
("track_id", c_int)]
class DETNUMPAIR(Structure):
_fields_ = [("num", c_int),
("dets", POINTER(DETECTION))]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
def network_width(net):
return lib.network_width(net)
def network_height(net):
return lib.network_height(net)
def bbox2points(bbox):
"""
From bounding box yolo format
to corner points cv2 rectangle
"""
x, y, w, h = bbox
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
def class_colors(names):
"""
Create a dict with one random BGR color for each
class name
"""
return {name: (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)) for name in names}
def load_network(config_file, data_file, weights, batch_size=1):
"""
load model description and weights from config files
args:
config_file (str): path to .cfg model file
data_file (str): path to .data model file
weights (str): path to weights
returns:
network: trained model
class_names
class_colors
"""
network = load_net_custom(
config_file.encode("ascii"),
weights.encode("ascii"), 0, batch_size)
metadata = load_meta(data_file.encode("ascii"))
class_names = [metadata.names[i].decode("ascii") for i in range(metadata.classes)]
colors = class_colors(class_names)
return network, class_names, colors
def print_detections(detections):
print("\nObjects:")
for label, confidence, bbox in detections:
if label == "person":
x, y, w, h = bbox
print("{}: {}%".format(label, confidence))
def draw_boxes(detections, image, colors):
import cv2
for label, confidence, bbox in detections:
if label == "person":
left, top, right, bottom = bbox2points(bbox)
cv2.rectangle(image, (left, top), (right, bottom), colors[label], 1)
cv2.putText(image, "{} [{:.2f}]".format(label, float(confidence)),
(left, top - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
colors[label], 2)
return image
def decode_detection(detections):
decoded = []
for label, confidence, bbox in detections:
confidence = str(round(confidence * 100, 2))
decoded.append((str(label), confidence, bbox))
return decoded
def remove_negatives(detections, class_names, num):
"""
Remove all classes with 0% confidence within the detection
"""
predictions = []
for j in range(num):
for idx, name in enumerate(class_names):
if detections[j].prob[idx] > 0:
bbox = detections[j].bbox
bbox = (bbox.x, bbox.y, bbox.w, bbox.h)
predictions.append((name, detections[j].prob[idx], (bbox)))
return predictions
def detect_image(network, class_names, image, thresh=.5, hier_thresh=.5, nms=.45):
"""
Returns a list with highest confidence class and their bbox
"""
pnum = pointer(c_int(0))
predict_image(network, image)
detections = get_network_boxes(network, image.w, image.h,
thresh, hier_thresh, None, 0, pnum, 0)
num = pnum[0]
if nms:
do_nms_sort(detections, num, len(class_names), nms)
predictions = remove_negatives(detections, class_names, num)
predictions = decode_detection(predictions)
free_detections(detections, num)
return sorted(predictions, key=lambda x: x[1])
# lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
# lib = CDLL("libdarknet.so", RTLD_GLOBAL)
hasGPU = True
if os.name == "nt":
cwd = os.path.dirname(__file__)
os.environ['PATH'] = cwd + ';' + os.environ['PATH']
winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll")
winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_nogpu.dll")
envKeys = list()
for k, v in os.environ.items():
envKeys.append(k)
try:
try:
tmp = os.environ["FORCE_CPU"].lower()
if tmp in ["1", "true", "yes", "on"]:
raise ValueError("ForceCPU")
else:
print("Flag value {} not forcing CPU mode".format(tmp))
except KeyError:
# We never set the flag
if 'CUDA_VISIBLE_DEVICES' in envKeys:
if int(os.environ['CUDA_VISIBLE_DEVICES']) < 0:
raise ValueError("ForceCPU")
try:
global DARKNET_FORCE_CPU
if DARKNET_FORCE_CPU:
raise ValueError("ForceCPU")
except NameError as cpu_error:
print(cpu_error)
if not os.path.exists(winGPUdll):
raise ValueError("NoDLL")
lib = CDLL(winGPUdll, RTLD_GLOBAL)
except (KeyError, ValueError):
hasGPU = False
if os.path.exists(winNoGPUdll):
lib = CDLL(winNoGPUdll, RTLD_GLOBAL)
print("Notice: CPU-only mode")
else:
# Try the other way, in case no_gpu was compile but not renamed
lib = CDLL(winGPUdll, RTLD_GLOBAL)
print("Environment variables indicated a CPU run, but we didn't find {}. Trying a GPU run anyway.".format(winNoGPUdll))
else:
lib = CDLL(os.path.join(
os.environ.get('DARKNET_PATH', './'),
"libdarknet.so"), RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
copy_image_from_bytes = lib.copy_image_from_bytes
copy_image_from_bytes.argtypes = [IMAGE,c_char_p]
predict = lib.network_predict_ptr
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
if hasGPU:
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
init_cpu = lib.init_cpu
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int), c_int]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_batch_detections = lib.free_batch_detections
free_batch_detections.argtypes = [POINTER(DETNUMPAIR), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict_ptr
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
load_net_custom = lib.load_network_custom
load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
load_net_custom.restype = c_void_p
free_network_ptr = lib.free_network_ptr
free_network_ptr.argtypes = [c_void_p]
free_network_ptr.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
predict_image_letterbox = lib.network_predict_image_letterbox
predict_image_letterbox.argtypes = [c_void_p, IMAGE]
predict_image_letterbox.restype = POINTER(c_float)
network_predict_batch = lib.network_predict_batch
network_predict_batch.argtypes = [c_void_p, IMAGE, c_int, c_int, c_int,
c_float, c_float, POINTER(c_int), c_int, c_int]
network_predict_batch.restype = POINTER(DETNUMPAIR)
|
the-stack_0_4816 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Qiming Sun <[email protected]>
# Susi Lehtola <[email protected]>
'''
XC functional, the interface to libxc
(http://www.tddft.org/programs/octopus/wiki/index.php/Libxc)
'''
import sys
import warnings
import copy
import ctypes
import math
import numpy
from pyscf import lib
from pyscf.dft.xc.utils import remove_dup, format_xc_code
from pyscf import __config__
_itrf = lib.load_library('libxc_itrf')
_itrf.LIBXC_is_lda.restype = ctypes.c_int
_itrf.LIBXC_is_gga.restype = ctypes.c_int
_itrf.LIBXC_is_meta_gga.restype = ctypes.c_int
_itrf.LIBXC_needs_laplacian.restype = ctypes.c_int
_itrf.LIBXC_needs_laplacian.argtypes = [ctypes.c_int]
_itrf.LIBXC_is_hybrid.restype = ctypes.c_int
_itrf.LIBXC_is_cam_rsh.restype = ctypes.c_int
_itrf.LIBXC_max_deriv_order.restype = ctypes.c_int
_itrf.LIBXC_number_of_functionals.restype = ctypes.c_int
_itrf.LIBXC_functional_numbers.argtypes = (numpy.ctypeslib.ndpointer(dtype=numpy.intc, ndim=1, flags=("W", "C", "A")), )
_itrf.LIBXC_functional_name.argtypes = [ctypes.c_int]
_itrf.LIBXC_functional_name.restype = ctypes.c_char_p
_itrf.LIBXC_hybrid_coeff.argtypes = [ctypes.c_int]
_itrf.LIBXC_hybrid_coeff.restype = ctypes.c_double
_itrf.LIBXC_nlc_coeff.argtypes = [ctypes.c_int,ctypes.POINTER(ctypes.c_double)]
_itrf.LIBXC_rsh_coeff.argtypes = [ctypes.c_int,ctypes.POINTER(ctypes.c_double)]
_itrf.LIBXC_version.restype = ctypes.c_char_p
_itrf.LIBXC_reference.restype = ctypes.c_char_p
_itrf.LIBXC_reference_doi.restype = ctypes.c_char_p
_itrf.LIBXC_xc_reference.argtypes = [ctypes.c_int, (ctypes.c_char_p * 8)]
def libxc_version():
'''Returns the version of libxc'''
return _itrf.LIBXC_version().decode("UTF-8")
def libxc_reference():
'''Returns the reference to libxc'''
return _itrf.LIBXC_reference().decode("UTF-8")
def libxc_reference_doi():
'''Returns the reference to libxc'''
return _itrf.LIBXC_reference_doi().decode("UTF-8")
__version__ = libxc_version()
__reference__ = libxc_reference()
__reference_doi__ = libxc_reference_doi()
# Runtime detection of available functionals
dynamic_func = getattr(__config__, 'dft_libxc_dynamic', False)
if dynamic_func:
def available_libxc_functionals():
# Number of functionals is
nfunc = _itrf.LIBXC_number_of_functionals()
# Get functional numbers
numbers = numpy.zeros(nfunc, dtype=numpy.intc)
_itrf.LIBXC_functional_numbers(numbers)
# Returned array
return {_itrf.LIBXC_functional_name(x).decode("UTF-8").upper() : x for x in numbers}
XC = XC_CODES = available_libxc_functionals()
PROBLEMATIC_XC = dict([])
else:
# XC dict is generated by
#import pylibxc
#for xcname in pylibxc.util.xc_available_functional_names():
# f = pylibxc.LibXCFunctional(xcname, 1)
# f_id = f.get_number()
# ref = f.get_references()
# key = f"'{xcname.upper()}'"
# print(f"{key:<31s}: {f_id:<3d}, # {ref[0]}")
XC = XC_CODES = {
'LDA_C_1D_CSC' : 18 , # M. Casula, S. Sorella, and G. Senatore, Phys. Rev. B 74, 245427 (2006)
'LDA_C_1D_LOOS' : 26 , # P.-F. Loos, J. Chem. Phys. 138, 064108 (2013)
'LDA_C_2D_AMGB' : 15 , # C. Attaccalite, S. Moroni, P. Gori-Giorgi, and G. B. Bachelet, Phys. Rev. Lett. 88, 256601 (2002)
'LDA_C_2D_PRM' : 16 , # S. Pittalis, E. Rasanen, and M. A. L. Marques, Phys. Rev. B 78, 195322 (2008)
'LDA_C_BR78' : 552, # G. B. Jr. and S. M. Rothstein, J. Chem. Phys. 69, 1177 (1978)
'LDA_C_CHACHIYO' : 287, # T. Chachiyo, J. Chem. Phys. 145, 021101 (2016)
'LDA_C_CHACHIYO_MOD' : 307, # T. Chachiyo and H. Chachiyo, Comput. Theor. Chem. 1172, 112669 (2020)
'LDA_C_GK72' : 578, # R. G. Gordon and Y. S. Kim, J. Chem. Phys. 56, 3122 (1972)
'LDA_C_GL' : 5 , # O. Gunnarsson and B. I. Lundqvist, Phys. Rev. B 13, 4274 (1976)
'LDA_C_GOMBAS' : 24 , # P. Gombas, Fortschr. Phys. 13, 137 (1965)
'LDA_C_HL' : 4 , # L. Hedin and B. I. Lundqvist, J. Phys. C: Solid State Phys. 4, 2064 (1971)
'LDA_C_KARASIEV' : 579, # V. V. Karasiev, J. Chem. Phys. 145, 157101 (2016)
'LDA_C_KARASIEV_MOD' : 308, # T. Chachiyo and H. Chachiyo, Comput. Theor. Chem. 1172, 112669 (2020)
'LDA_C_LP96' : 289, # S. Liu and R. G. Parr, Phys. Rev. A 53, 2211 (1996)
'LDA_C_MCWEENY' : 551, # R. McWeeny, in The New World of Quantum Chemistry, edited by B. Pullman and R. Parr (Reidel, Boston, 1976) pp. 3--31
'LDA_C_ML1' : 22 , # E. I. Proynov and D. R. Salahub, Phys. Rev. B 49, 7874 (1994)
'LDA_C_ML2' : 23 , # E. I. Proynov and D. R. Salahub, Phys. Rev. B 49, 7874 (1994)
'LDA_C_OB_PW' : 14 , # G. Ortiz and P. Ballone, Phys. Rev. B 50, 1391 (1994)
'LDA_C_OB_PZ' : 11 , # G. Ortiz and P. Ballone, Phys. Rev. B 50, 1391 (1994)
'LDA_C_OW' : 574, # P. A. Stewart and P. M. W. Gill, J. Chem. Soc., Faraday Trans. 91, 4337 (1995)
'LDA_C_OW_LYP' : 573, # P. A. Stewart and P. M. W. Gill, J. Chem. Soc., Faraday Trans. 91, 4337 (1995)
'LDA_C_PK09' : 554, # E. Proynov and J. Kong, Phys. Rev. A 79, 014103 (2009)
'LDA_C_PMGB06' : 590, # S. Paziani, S. Moroni, P. Gori-Giorgi, and G. B. Bachelet, Phys. Rev. B 73, 155111 (2006)
'LDA_C_PW' : 12 , # J. P. Perdew and Y. Wang, Phys. Rev. B 45, 13244 (1992)
'LDA_C_PW_MOD' : 13 , # J. P. Perdew and Y. Wang, Phys. Rev. B 45, 13244 (1992), added extra digits to some constants as in the PBE routine (http://dft.rutgers.edu/pubs/PBE.asc)
'LDA_C_PW_RPA' : 25 , # J. P. Perdew and Y. Wang, Phys. Rev. B 45, 13244 (1992)
'LDA_C_PZ' : 9 , # J. P. Perdew and A. Zunger, Phys. Rev. B 23, 5048 (1981)
'LDA_C_PZ_MOD' : 10 , # J. P. Perdew and A. Zunger, Phys. Rev. B 23, 5048 (1981), modified to improve the matching between the low- and high-rs parts
'LDA_C_RC04' : 27 , # S. Ragot and P. Cortona, J. Chem. Phys. 121, 7671 (2004)
'LDA_C_RPA' : 3 , # M. Gell-Mann and K. A. Brueckner, Phys. Rev. 106, 364 (1957)
'LDA_C_RPW92' : 684, # M. Ruggeri, P. L. Rios, and A. Alavi, Phys. Rev. B 98, 161105 (2018)
'LDA_C_UPW92' : 683, # M. Ruggeri, P. L. Rios, and A. Alavi, Phys. Rev. B 98, 161105 (2018)
'LDA_C_VBH' : 17 , # U. von Barth and L. Hedin, J. Phys. C: Solid State Phys. 5, 1629 (1972)
'LDA_C_VWN' : 7 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_1' : 28 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_2' : 29 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_3' : 30 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_4' : 31 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_RPA' : 8 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_WIGNER' : 2 , # E. Wigner, Trans. Faraday Soc. 34, 678 (1938)
'LDA_C_XALPHA' : 6 , # J. C. Slater, Phys. Rev. 81, 385 (1951)
'LDA_K_GDS08_WORKER' : 100001, # L. M. Ghiringhelli and L. Delle Site, Phys. Rev. B 77, 073104 (2008)
'LDA_K_LP' : 51 , # C. Lee and R. G. Parr, Phys. Rev. A 35, 2377 (1987)
'LDA_K_LP96' : 580, # S. Liu and R. G. Parr, Phys. Rev. A 53, 2211 (1996)
'LDA_K_TF' : 50 , # L. H. Thomas, Math. Proc. Cambridge Philos. Soc. 23, 542 (1927)
'LDA_K_ZLP' : 550, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'LDA_X' : 1 , # P. A. M. Dirac, Math. Proc. Cambridge Philos. Soc. 26, 376 (1930)
'LDA_X_1D_EXPONENTIAL' : 600, # N. Helbig, J. I. Fuks, M. Casula, M. J. Verstraete, M. A. L. Marques, I. V. Tokatly, and A. Rubio, Phys. Rev. A 83, 032503 (2011)
'LDA_X_1D_SOFT' : 21 , # N. Helbig, J. I. Fuks, M. Casula, M. J. Verstraete, M. A. L. Marques, I. V. Tokatly, and A. Rubio, Phys. Rev. A 83, 032503 (2011)
'LDA_X_2D' : 19 , # P. A. M. Dirac, Math. Proc. Cambridge Philos. Soc. 26, 376 (1930)
'LDA_X_ERF' : 546, # P. M. W. Gill, R. D. Adamson, and J. A. Pople, Mol. Phys. 88, 1005 (1996)
'LDA_X_RAE' : 549, # A. Rae, Chem. Phys. Lett. 18, 574 (1973)
'LDA_X_REL' : 532, # A. K. Rajagopal, J. Phys. C: Solid State Phys. 11, L943 (1978)
'LDA_X_SLOC' : 692, # K. Finzel and A. I. Baranov, Int. J. Quantum Chem. 117, 40 (2017)
'LDA_XC_1D_EHWLRG_1' : 536, # M. T. Entwistle, M. J. P. Hodgson, J. Wetherell, B. Longstaff, J. D. Ramsden, and R. W. Godby, Phys. Rev. B 94, 205134 (2016)
'LDA_XC_1D_EHWLRG_2' : 537, # M. T. Entwistle, M. J. P. Hodgson, J. Wetherell, B. Longstaff, J. D. Ramsden, and R. W. Godby, Phys. Rev. B 94, 205134 (2016)
'LDA_XC_1D_EHWLRG_3' : 538, # M. T. Entwistle, M. J. P. Hodgson, J. Wetherell, B. Longstaff, J. D. Ramsden, and R. W. Godby, Phys. Rev. B 94, 205134 (2016)
'LDA_XC_BN05' : 588, # R. Baer and D. Neuhauser, Phys. Rev. Lett. 94, 043002 (2005)
'LDA_XC_GDSMFB' : 577, # S. Groth, T. Dornheim, T. Sjostrom, F. D. Malone, W. M. C. Foulkes, and M. Bonitz, Phys. Rev. Lett. 119, 135001 (2017)
'LDA_XC_KSDT' : 259, # V. V. Karasiev, T. Sjostrom, J. Dufty, and S. B. Trickey, Phys. Rev. Lett. 112, 076403 (2014)
'LDA_XC_LP_A' : 547, # C. Lee and R. G. Parr, Phys. Rev. A 42, 193 (1990)
'LDA_XC_LP_B' : 548, # C. Lee and R. G. Parr, Phys. Rev. A 42, 193 (1990)
'LDA_XC_TETER93' : 20 , # S. Goedecker, M. Teter, and J. Hutter, Phys. Rev. B 54, 1703 (1996)
'LDA_XC_TIH' : 599, # D. J. Tozer, V. E. Ingamells, and N. C. Handy, J. Chem. Phys. 105, 9200 (1996)
'LDA_XC_ZLP' : 43 , # Q. Zhao, M. Levy, and R. G. Parr, Phys. Rev. A 47, 918 (1993)
'HYB_LDA_XC_CAM_LDA0' : 178, # M. A. Mosquera, C. H. Borca, M. A. Ratner, and G. C. Schatz, J. Phys. Chem. A 120, 1605 (2016)
'HYB_LDA_XC_LDA0' : 177, # P. Rinke, A. Schleife, E. Kioupakis, A. Janotti, C. Rodl, F. Bechstedt, M. Scheffler, and C. G. Van de Walle, Phys. Rev. Lett. 108, 126404 (2012)
'GGA_C_ACGGA' : 39 , # A. Cancio, G. P. Chen, B. T. Krull, and K. Burke, J. Chem. Phys. 149, 084116 (2018)
'GGA_C_ACGGAP' : 176, # A. Cancio, G. P. Chen, B. T. Krull, and K. Burke, J. Chem. Phys. 149, 084116 (2018)
'GGA_C_AM05' : 135, # R. Armiento and A. E. Mattsson, Phys. Rev. B 72, 085108 (2005)
'GGA_C_APBE' : 186, # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_C_BMK' : 280, # A. D. Boese and J. M. L. Martin, J. Chem. Phys. 121, 3405 (2004)
'GGA_C_CCDF' : 313, # J. T. Margraf, C. Kunkel, and K. Reuter, J. Chem. Phys. 150, 244116 (2019)
'GGA_C_CHACHIYO' : 309, # T. Chachiyo and H. Chachiyo, Comput. Theor. Chem. 1172, 112669 (2020)
'GGA_C_CS1' : 565, # N. C. Handy and A. J. Cohen, J. Chem. Phys. 116, 5411 (2002)
'GGA_C_FT97' : 88 , # M. Filatov and W. Thiel, Int. J. Quantum Chem. 62, 603 (1997)
'GGA_C_GAM' : 33 , # H. S. Yu, W. Zhang, P. Verma, X. He, and D. G. Truhlar, Phys. Chem. Chem. Phys. 17, 12146 (2015)
'GGA_C_GAPC' : 555, # E. Fabiano, P. E. Trevisanutto, A. Terentjevs, and L. A. Constantin, J. Chem. Theory Comput. 10, 2016 (2014)
'GGA_C_GAPLOC' : 556, # E. Fabiano, P. E. Trevisanutto, A. Terentjevs, and L. A. Constantin, J. Chem. Theory Comput. 10, 2016 (2014)
'GGA_C_HCTH_A' : 97 , # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'GGA_C_HYB_TAU_HCTH' : 283, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'GGA_C_LM' : 137, # D. C. Langreth and M. J. Mehl, Phys. Rev. Lett. 47, 446 (1981)
'GGA_C_LYP' : 131, # C. Lee, W. Yang, and R. G. Parr, Phys. Rev. B 37, 785 (1988)
'GGA_C_MGGAC' : 712, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 155140 (2019)
'GGA_C_N12' : 80 , # R. Peverati and D. G. Truhlar, J. Chem. Theory Comput. 8, 2310 (2012)
'GGA_C_N12_SX' : 79 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'GGA_C_OP_B88' : 87 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_G96' : 85 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_PBE' : 86 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_PW91' : 262, # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_XALPHA' : 84 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OPTC' : 200, # A. J. Cohen and N. C. Handy, Mol. Phys. 99, 607 (2001)
'GGA_C_P86' : 132, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_P86_FT' : 217, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_P86VWN' : 252, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_P86VWN_FT' : 253, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_PBE' : 130, # J. P. Perdew, K. Burke, and M. Ernzerhof, Phys. Rev. Lett. 77, 3865 (1996)
'GGA_C_PBE_JRGX' : 138, # L. S. Pedroza, A. J. R. da Silva, and K. Capelle, Phys. Rev. B 79, 201106 (2009)
'GGA_C_PBE_MOL' : 272, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'GGA_C_PBE_SOL' : 133, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, O. A. Vydrov, G. E. Scuseria, L. A. Constantin, X. Zhou, and K. Burke, Phys. Rev. Lett. 100, 136406 (2008)
'GGA_C_PBE_VWN' : 216, # E. Kraisler, G. Makov, and I. Kelson, Phys. Rev. A 82, 042516 (2010)
'GGA_C_PBEFE' : 258, # R. Sarmiento-Perez, S. Botti, and M. A. L. Marques, J. Chem. Theory Comput. 11, 3844 (2015)
'GGA_C_PBEINT' : 62 , # E. Fabiano, L. A. Constantin, and F. Della Sala, Phys. Rev. B 82, 113104 (2010)
'GGA_C_PBELOC' : 246, # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 86, 035130 (2012)
'GGA_C_PW91' : 134, # J. P. Perdew, in Proceedings of the 75. WE-Heraeus-Seminar and 21st Annual International Symposium on Electronic Structure of Solids, edited by P. Ziesche and H. Eschrig (Akademie Verlag, Berlin, 1991) p. 11
'GGA_C_Q2D' : 47 , # L. Chiodo, L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. Lett. 108, 126402 (2012)
'GGA_C_REGTPSS' : 83 , # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, L. A. Constantin, and J. Sun, Phys. Rev. Lett. 103, 026403 (2009)
'GGA_C_REVTCA' : 99 , # V. Tognetti, P. Cortona, and C. Adamo, Chem. Phys. Lett. 460, 536 (2008)
'GGA_C_RGE2' : 143, # A. Ruzsinszky, G. I. Csonka, and G. E. Scuseria, J. Chem. Theory Comput. 5, 763 (2009)
'GGA_C_SCAN_E0' : 553, # J. Sun, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. Lett. 115, 036402 (2015)
'GGA_C_SG4' : 534, # L. A. Constantin, A. Terentjevs, F. Della Sala, P. Cortona, and E. Fabiano, Phys. Rev. B 93, 045126 (2016)
'GGA_C_SOGGA11' : 152, # R. Peverati, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. Lett. 2, 1991 (2011)
'GGA_C_SOGGA11_X' : 159, # R. Peverati and D. G. Truhlar, J. Chem. Phys. 135, 191102 (2011)
'GGA_C_SPBE' : 89 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Chem. Phys. 131, 094103 (2009)
'GGA_C_TAU_HCTH' : 281, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'GGA_C_TCA' : 100, # V. Tognetti, P. Cortona, and C. Adamo, J. Chem. Phys. 128, 034101 (2008)
'GGA_C_TM_LYP' : 559, # A. J. Thakkar and S. P. McCarthy, J. Chem. Phys. 131, 134109 (2009)
'GGA_C_TM_PBE' : 560, # A. J. Thakkar and S. P. McCarthy, J. Chem. Phys. 131, 134109 (2009)
'GGA_C_W94' : 561, # L. C. Wilson, Chem. Phys. 181, 337 (1994)
'GGA_C_WI' : 148, # L. C. Wilson and S. Ivanov, Int. J. Quantum Chem. 69, 523 (1998)
'GGA_C_WI0' : 153, # L. C. Wilson and S. Ivanov, Int. J. Quantum Chem. 69, 523 (1998)
'GGA_C_WL' : 147, # L. C. Wilson and M. Levy, Phys. Rev. B 41, 12930 (1990)
'GGA_C_XPBE' : 136, # X. Xu and W. A. Goddard, J. Chem. Phys. 121, 4068 (2004)
'GGA_C_ZPBEINT' : 61 , # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 84, 233103 (2011)
'GGA_C_ZPBESOL' : 63 , # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 84, 233103 (2011)
'GGA_C_ZVPBEINT' : 557, # L. A. Constantin, E. Fabiano, and F. D. Sala, J. Chem. Phys. 137, 194105 (2012)
'GGA_C_ZVPBELOC' : 606, # E. Fabiano, L. A. Constantin, P. Cortona, and F. Della Sala, J. Chem. Theory Comput. 11, 122 (2015)
'GGA_C_ZVPBESOL' : 558, # L. A. Constantin, E. Fabiano, and F. D. Sala, J. Chem. Phys. 137, 194105 (2012)
'GGA_K_ABSP1' : 506, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_ABSP2' : 507, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_ABSP3' : 277, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_ABSP4' : 278, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_APBE' : 185, # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_K_APBEINT' : 54 , # S. Laricchia, E. Fabiano, L. A. Constantin, and F. Della Sala, J. Chem. Theory Comput. 7, 2439 (2011)
'GGA_K_BALTIN' : 504, # R. Baltin, Z. Naturforsch. A 27, 1176 (1972)
'GGA_K_DK' : 516, # A. E. DePristo and J. D. Kress, Phys. Rev. A 35, 438 (1987)
'GGA_K_ERNZERHOF' : 520, # M. Ernzerhof, J. Mol. Struct.: THEOCHEM 501--502, 59 (2000)
'GGA_K_EXP4' : 597, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_FR_B88' : 514, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'GGA_K_FR_PW86' : 515, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'GGA_K_GDS08' : 591, # L. M. Ghiringhelli and L. Delle Site, Phys. Rev. B 77, 073104 (2008)
'GGA_K_GE2' : 501, # A. S. Kompaneets and E. S. Pavlovskii, Zh. Eksp. Teor. Fiz. 31, 427 (1956), [J. Exp. Theor. Phys. 4, 328 (1957)]
'GGA_K_GHDS10' : 592, # L. M. Ghiringhelli, I. P. Hamilton, and L. D. Site, J. Chem. Phys. 132, 014106 (2010)
'GGA_K_GHDS10R' : 593, # S. B. Trickey, V. V. Karasiev, and A. Vela, Phys. Rev. B 84, 075146 (2011)
'GGA_K_GOLDEN' : 502, # S. Golden, Phys. Rev. 105, 604 (1957)
'GGA_K_GP85' : 510, # S. K. Ghosh and R. G. Parr, J. Chem. Phys. 82, 3307 (1985)
'GGA_K_GR' : 508, # J. L. Gazquez and J. Robles, J. Chem. Phys. 76, 1467 (1982)
'GGA_K_LC94' : 521, # A. Lembarki and H. Chermette, Phys. Rev. A 50, 5328 (1994)
'GGA_K_LGAP' : 620, # L. A. Constantin, E. Fabiano, S. Smiga, and F. Della Sala, Phys. Rev. B 95, 115153 (2017)
'GGA_K_LGAP_GE' : 633, # L. A. Constantin, E. Fabiano, S. Smiga, and F. Della Sala, Phys. Rev. B 95, 115153 (2017)
'GGA_K_LIEB' : 505, # E. H. Lieb, Rev. Mod. Phys. 53, 603 (1981)
'GGA_K_LKT' : 613, # K. Luo, V. V. Karasiev, and S. B. Trickey, Phys. Rev. B 98, 041111 (2018)
'GGA_K_LLP' : 522, # H. Lee, C. Lee, and R. G. Parr, Phys. Rev. A 44, 768 (1991)
'GGA_K_LUDENA' : 509, # E. V. Ludena, in Cond. Matt. Theor., Vol. 1, edited by F. B. Malik (Plenum, New York, 1986) p. 183
'GGA_K_MEYER' : 57 , # A. Meyer, G. C. Wang, and W. H. Young, Z. Naturforsch. A 31, 898 (1976)
'GGA_K_OL1' : 512, # H. Ou-Yang and M. Levy, Int. J. Quantum Chem. 40, 379 (1991)
'GGA_K_OL2' : 513, # H. Ou-Yang and M. Levy, Int. J. Quantum Chem. 40, 379 (1991)
'GGA_K_PBE2' : 616, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_PBE3' : 595, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_PBE4' : 596, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_PEARSON' : 511, # D. J. Lacks and R. G. Gordon, J. Chem. Phys. 100, 4446 (1994)
'GGA_K_PERDEW' : 517, # J. P. Perdew, Phys. Lett. A 165, 79 (1992)
'GGA_K_PG1' : 219, # L. A. Constantin, E. Fabiano, and F. Della Sala, J. Phys. Chem. Lett. 9, 4385 (2018), pMID: 30019904
'GGA_K_RATIONAL_P' : 218, # J. Lehtomaki and O. Lopez-Acevedo, Phys. Rev. B 100, 165111 (2019)
'GGA_K_REVAPBE' : 55 , # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_K_REVAPBEINT' : 53 , # S. Laricchia, E. Fabiano, L. A. Constantin, and F. Della Sala, J. Chem. Theory Comput. 7, 2439 (2011)
'GGA_K_TFVW' : 52 , # C. F. von Weizsacker, Z. Phys. 96, 431 (1935)
'GGA_K_TFVW_OPT' : 635, # L. A. Espinosa Leal, A. Karpenko, M. A. Caro, and O. Lopez-Acevedo, Phys. Chem. Chem. Phys. 17, 31463 (2015)
'GGA_K_THAKKAR' : 523, # A. J. Thakkar, Phys. Rev. A 46, 6920 (1992)
'GGA_K_TKVLN' : 594, # S. B. Trickey, V. V. Karasiev, and A. Vela, Phys. Rev. B 84, 075146 (2011)
'GGA_K_TW1' : 187, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_TW2' : 188, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_TW3' : 189, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_TW4' : 190, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_VJKS' : 519, # L. Vitos, B. Johansson, J. Kollar, and H. L. Skriver, Phys. Rev. A 61, 052511 (2000)
'GGA_K_VSK' : 518, # L. Vitos, H. L. Skriver, and J. Kollar, Phys. Rev. B 57, 12611 (1998)
'GGA_K_VT84F' : 619, # V. V. Karasiev, D. Chakraborty, O. A. Shukruto, and S. B. Trickey, Phys. Rev. B 88, 161108 (2013)
'GGA_K_VW' : 500, # C. F. von Weizsacker, Z. Phys. 96, 431 (1935)
'GGA_K_YT65' : 503, # K. Yonei and Y. Tomishima, J. Phys. Soc. Jpn. 20, 1051 (1965)
'GGA_X_2D_B86' : 128, # J. G. Vilhena, E. Rasanen, M. A. L. Marques, and S. Pittalis, J. Chem. Theory Comput. 10, 1837 (2014)
'GGA_X_2D_B86_MGC' : 124, # S. Pittalis, E. Rasanen, J. G. Vilhena, and M. A. L. Marques, Phys. Rev. A 79, 012503 (2009)
'GGA_X_2D_B88' : 127, # J. G. Vilhena, E. Rasanen, M. A. L. Marques, and S. Pittalis, J. Chem. Theory Comput. 10, 1837 (2014)
'GGA_X_2D_PBE' : 129, # J. G. Vilhena, E. Rasanen, M. A. L. Marques, and S. Pittalis, J. Chem. Theory Comput. 10, 1837 (2014)
'GGA_X_AIRY' : 192, # L. A. Constantin, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. B 80, 035125 (2009)
'GGA_X_AK13' : 56 , # R. Armiento and S. Kummel, Phys. Rev. Lett. 111, 036402 (2013)
'GGA_X_AM05' : 120, # R. Armiento and A. E. Mattsson, Phys. Rev. B 72, 085108 (2005)
'GGA_X_APBE' : 184, # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_X_B86' : 103, # A. D. Becke, J. Chem. Phys. 84, 4524 (1986)
'GGA_X_B86_MGC' : 105, # A. D. Becke, J. Chem. Phys. 84, 4524 (1986)
'GGA_X_B86_R' : 41 , # I. Hamada, Phys. Rev. B 89, 121103 (2014)
'GGA_X_B88' : 106, # A. D. Becke, Phys. Rev. A 38, 3098 (1988)
'GGA_X_B88_6311G' : 179, # J. M. Ugalde, C. Sarasola, and M. Aguado, J. Phys. B: At., Mol. Opt. Phys. 27, 423 (1994)
'GGA_X_B88M' : 570, # E. Proynov, H. Chermette, and D. R. Salahub, J. Chem. Phys. 113, 10013 (2000)
'GGA_X_BAYESIAN' : 125, # J. J. Mortensen, K. Kaasbjerg, S. L. Frederiksen, J. K. Norskov, J. P. Sethna, and K. W. Jacobsen, Phys. Rev. Lett. 95, 216401 (2005)
'GGA_X_BCGP' : 38 , # K. Burke, A. Cancio, T. Gould, and S. Pittalis, ArXiv e-prints (2014), arXiv:1409.4834 [cond-mat.mtrl-sci]
'GGA_X_BEEFVDW' : 285, # J. Wellendorff, K. T. Lundgaard, A. Mogelhoj, V. Petzold, D. D. Landis, J. K. Norskov, T. Bligaard, and K. W. Jacobsen, Phys. Rev. B 85, 235149 (2012)
'GGA_X_BPCCAC' : 98 , # E. Bremond, D. Pilard, I. Ciofini, H. Chermette, C. Adamo, and P. Cortona, Theor. Chem. Acc. 131, 1184 (2012)
'GGA_X_C09X' : 158, # V. R. Cooper, Phys. Rev. B 81, 161104 (2010)
'GGA_X_CAP' : 270, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, J. Chem. Phys. 142, 054105 (2015)
'GGA_X_CHACHIYO' : 298, # T. Chachiyo and H. Chachiyo, Molecules 25, 3485 (2020)
'GGA_X_DK87_R1' : 111, # A. E. DePristo and J. D. Kress, J. Chem. Phys. 86, 1425 (1987)
'GGA_X_DK87_R2' : 112, # A. E. DePristo and J. D. Kress, J. Chem. Phys. 86, 1425 (1987)
'GGA_X_EB88' : 271, # P. Elliott and K. Burke, Can. J. Chem. 87, 1485 (2009)
'GGA_X_ECMV92' : 215, # E. Engel, J. A. Chevary, L. D. Macdonald, and S. H. Vosko, Z. Phys. D: At., Mol. Clusters 23, 7 (1992)
'GGA_X_EV93' : 35 , # E. Engel and S. H. Vosko, Phys. Rev. B 47, 13164 (1993)
'GGA_X_FD_LB94' : 604, # A. P. Gaiduk and V. N. Staroverov, Phys. Rev. A 83, 012509 (2011)
'GGA_X_FD_REVLB94' : 605, # A. P. Gaiduk and V. N. Staroverov, Phys. Rev. A 83, 012509 (2011)
'GGA_X_FT97_A' : 114, # M. Filatov and W. Thiel, Mol. Phys. 91, 847 (1997)
'GGA_X_FT97_B' : 115, # M. Filatov and W. Thiel, Mol. Phys. 91, 847 (1997)
'GGA_X_G96' : 107, # P. M. W. Gill, Mol. Phys. 89, 433 (1996)
'GGA_X_GAM' : 32 , # H. S. Yu, W. Zhang, P. Verma, X. He, and D. G. Truhlar, Phys. Chem. Chem. Phys. 17, 12146 (2015)
'GGA_X_GG99' : 535, # A. T. Gilbert and P. M. Gill, Chem. Phys. Lett. 312, 511 (1999)
'GGA_X_HCTH_A' : 34 , # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'GGA_X_HERMAN' : 104, # F. Herman, J. P. V. Dyke, and I. B. Ortenburger, Phys. Rev. Lett. 22, 807 (1969)
'GGA_X_HJS_B88' : 527, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HJS_B88_V2' : 46 , # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'GGA_X_HJS_B97X' : 528, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HJS_PBE' : 525, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HJS_PBE_SOL' : 526, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HTBS' : 191, # P. Haas, F. Tran, P. Blaha, and K. Schwarz, Phys. Rev. B 83, 205117 (2011)
'GGA_X_ITYH' : 529, # H. Iikura, T. Tsuneda, T. Yanai, and K. Hirao, J. Chem. Phys. 115, 3540 (2001)
'GGA_X_ITYH_OPTX' : 622, # N. C. Handy and A. J. Cohen, Mol. Phys. 99, 403 (2001)
'GGA_X_ITYH_PBE' : 623, # J. P. Perdew, K. Burke, and M. Ernzerhof, Phys. Rev. Lett. 77, 3865 (1996)
'GGA_X_KGG99' : 544, # A. T. Gilbert and P. M. Gill, Chem. Phys. Lett. 312, 511 (1999)
'GGA_X_KT1' : 145, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 119, 3015 (2003)
'GGA_X_LAG' : 193, # L. Vitos, B. Johansson, J. Kollar, and H. L. Skriver, Phys. Rev. B 62, 10046 (2000)
'GGA_X_LAMBDA_CH_N' : 44 , # M. M. Odashima, K. Capelle, and S. B. Trickey, J. Chem. Theory Comput. 5, 798 (2009)
'GGA_X_LAMBDA_LO_N' : 45 , # M. M. Odashima, K. Capelle, and S. B. Trickey, J. Chem. Theory Comput. 5, 798 (2009)
'GGA_X_LAMBDA_OC2_N' : 40 , # M. M. Odashima, K. Capelle, and S. B. Trickey, J. Chem. Theory Comput. 5, 798 (2009)
'GGA_X_LB' : 160, # R. van Leeuwen and E. J. Baerends, Phys. Rev. A 49, 2421 (1994)
'GGA_X_LBM' : 182, # P. R. T. Schipper, O. V. Gritsenko, S. J. A. van Gisbergen, and E. J. Baerends, J. Chem. Phys. 112, 1344 (2000)
'GGA_X_LG93' : 113, # D. J. Lacks and R. G. Gordon, Phys. Rev. A 47, 4681 (1993)
'GGA_X_LSPBE' : 168, # J. C. Pacheco-Kato, J. M. del Campo, J. L. Gazquez, S. Trickey, and A. Vela, Chem. Phys. Lett. 651, 268 (2016)
'GGA_X_LSRPBE' : 169, # J. C. Pacheco-Kato, J. M. del Campo, J. L. Gazquez, S. Trickey, and A. Vela, Chem. Phys. Lett. 651, 268 (2016)
'GGA_X_LV_RPW86' : 58 , # K. Berland and P. Hyldgaard, Phys. Rev. B 89, 035412 (2014)
'GGA_X_MB88' : 149, # V. Tognetti and C. Adamo, J. Phys. Chem. A 113, 14415 (2009)
'GGA_X_MPBE' : 122, # C. Adamo and V. Barone, J. Chem. Phys. 116, 5933 (2002)
'GGA_X_MPW91' : 119, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'GGA_X_N12' : 82 , # R. Peverati and D. G. Truhlar, J. Chem. Theory Comput. 8, 2310 (2012)
'GGA_X_NCAP' : 180, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, J. Chem. Theory Comput. 15, 303 (2019)
'GGA_X_OL2' : 183, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'GGA_X_OPTB86B_VDW' : 171, # J. Klimes, D. R. Bowler, and A. Michaelides, Phys. Rev. B 83, 195131 (2011)
'GGA_X_OPTB88_VDW' : 139, # J. Klimes, D. R. Bowler, and A. Michaelides, J. Phys.: Condens. Matter 22, 022201 (2010)
'GGA_X_OPTPBE_VDW' : 141, # J. Klimes, D. R. Bowler, and A. Michaelides, J. Phys.: Condens. Matter 22, 022201 (2010)
'GGA_X_OPTX' : 110, # N. C. Handy and A. J. Cohen, Mol. Phys. 99, 403 (2001)
'GGA_X_PBE' : 101, # J. P. Perdew, K. Burke, and M. Ernzerhof, Phys. Rev. Lett. 77, 3865 (1996)
'GGA_X_PBE_JSJR' : 126, # L. S. Pedroza, A. J. R. da Silva, and K. Capelle, Phys. Rev. B 79, 201106 (2009)
'GGA_X_PBE_MOL' : 49 , # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'GGA_X_PBE_R' : 102, # Y. Zhang and W. Yang, Phys. Rev. Lett. 80, 890 (1998)
'GGA_X_PBE_SOL' : 116, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, O. A. Vydrov, G. E. Scuseria, L. A. Constantin, X. Zhou, and K. Burke, Phys. Rev. Lett. 100, 136406 (2008)
'GGA_X_PBE_TCA' : 59 , # V. Tognetti, P. Cortona, and C. Adamo, Chem. Phys. Lett. 460, 536 (2008)
'GGA_X_PBEA' : 121, # G. K. H. Madsen, Phys. Rev. B 75, 195108 (2007)
'GGA_X_PBEFE' : 265, # R. Sarmiento-Perez, S. Botti, and M. A. L. Marques, J. Chem. Theory Comput. 11, 3844 (2015)
'GGA_X_PBEINT' : 60 , # E. Fabiano, L. A. Constantin, and F. Della Sala, Phys. Rev. B 82, 113104 (2010)
'GGA_X_PBEK1_VDW' : 140, # J. Klimes, D. R. Bowler, and A. Michaelides, J. Phys.: Condens. Matter 22, 022201 (2010)
'GGA_X_PBEPOW' : 539, # Eric Bremond, J. Chem. Phys. 145, 244102 (2016)
'GGA_X_PBETRANS' : 291, # Eric Bremond, I. Ciofini, and C. Adamo, Mol. Phys. 114, 1059 (2016)
'GGA_X_PW86' : 108, # J. P. Perdew and W. Yue, Phys. Rev. B 33, 8800 (1986)
'GGA_X_PW91' : 109, # J. P. Perdew, in Proceedings of the 75. WE-Heraeus-Seminar and 21st Annual International Symposium on Electronic Structure of Solids, edited by P. Ziesche and H. Eschrig (Akademie Verlag, Berlin, 1991) p. 11
'GGA_X_PW91_MOD' : 316, # J. P. Perdew, in Proceedings of the 75. WE-Heraeus-Seminar and 21st Annual International Symposium on Electronic Structure of Solids, edited by P. Ziesche and H. Eschrig (Akademie Verlag, Berlin, 1991) p. 11
'GGA_X_Q2D' : 48 , # L. Chiodo, L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. Lett. 108, 126402 (2012)
'GGA_X_REVSSB_D' : 312, # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Comput. Chem. 32, 1117 (2011)
'GGA_X_RGE2' : 142, # A. Ruzsinszky, G. I. Csonka, and G. E. Scuseria, J. Chem. Theory Comput. 5, 763 (2009)
'GGA_X_RPBE' : 117, # B. Hammer, L. B. Hansen, and J. K. Norskov, Phys. Rev. B 59, 7413 (1999)
'GGA_X_RPW86' : 144, # E. D. Murray, K. Lee, and D. C. Langreth, J. Chem. Theory Comput. 5, 2754 (2009)
'GGA_X_S12G' : 495, # M. Swart, Chem. Phys. Lett. 580, 166 (2013)
'GGA_X_SFAT' : 530, # A. Savin and H.-J. Flad, Int. J. Quantum Chem. 56, 327 (1995)
'GGA_X_SFAT_PBE' : 601, # A. Savin and H.-J. Flad, Int. J. Quantum Chem. 56, 327 (1995)
'GGA_X_SG4' : 533, # L. A. Constantin, A. Terentjevs, F. Della Sala, P. Cortona, and E. Fabiano, Phys. Rev. B 93, 045126 (2016)
'GGA_X_SOGGA' : 150, # Y. Zhao and D. G. Truhlar, J. Chem. Phys. 128, 184109 (2008)
'GGA_X_SOGGA11' : 151, # R. Peverati, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. Lett. 2, 1991 (2011)
'GGA_X_SSB' : 91 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Chem. Phys. 131, 094103 (2009)
'GGA_X_SSB_D' : 92 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Chem. Phys. 131, 094103 (2009)
'GGA_X_SSB_SW' : 90 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Comput. Methods Sci. Eng. 9, 69 (2009)
'GGA_X_VMT84_GE' : 68 , # A. Vela, J. C. Pacheco-Kato, J. L. Gazquez, J. M. del Campo, and S. B. Trickey, J. Chem. Phys. 136, 144115 (2012)
'GGA_X_VMT84_PBE' : 69 , # A. Vela, J. C. Pacheco-Kato, J. L. Gazquez, J. M. del Campo, and S. B. Trickey, J. Chem. Phys. 136, 144115 (2012)
'GGA_X_VMT_GE' : 70 , # A. Vela, V. Medel, and S. B. Trickey, J. Chem. Phys. 130, 244103 (2009)
'GGA_X_VMT_PBE' : 71 , # A. Vela, V. Medel, and S. B. Trickey, J. Chem. Phys. 130, 244103 (2009)
'GGA_X_WC' : 118, # Z. Wu and R. E. Cohen, Phys. Rev. B 73, 235116 (2006)
'GGA_X_WPBEH' : 524, # J. Heyd, G. E. Scuseria, and M. Ernzerhof, J. Chem. Phys. 118, 8207 (2003)
'GGA_X_XPBE' : 123, # X. Xu and W. A. Goddard, J. Chem. Phys. 121, 4068 (2004)
'GGA_XC_B97_D' : 170, # S. Grimme, J. Comput. Chem. 27, 1787 (2006)
'GGA_XC_B97_GGA1' : 96 , # A. J. Cohen and N. C. Handy, Chem. Phys. Lett. 316, 160 (2000)
'GGA_XC_BEEFVDW' : 286, # J. Wellendorff, K. T. Lundgaard, A. Mogelhoj, V. Petzold, D. D. Landis, J. K. Norskov, T. Bligaard, and K. W. Jacobsen, Phys. Rev. B 85, 235149 (2012)
'GGA_XC_EDF1' : 165, # R. D. Adamson, P. M. W. Gill, and J. A. Pople, Chem. Phys. Lett. 284, 6 (1998)
'GGA_XC_HCTH_120' : 162, # A. D. Boese, N. L. Doltsinis, N. C. Handy, and M. Sprik, J. Chem. Phys. 112, 1670 (2000)
'GGA_XC_HCTH_147' : 163, # A. D. Boese, N. L. Doltsinis, N. C. Handy, and M. Sprik, J. Chem. Phys. 112, 1670 (2000)
'GGA_XC_HCTH_407' : 164, # A. D. Boese and N. C. Handy, J. Chem. Phys. 114, 5497 (2001)
'GGA_XC_HCTH_407P' : 93 , # A. D. Boese, A. Chandra, J. M. L. Martin, and D. Marx, J. Chem. Phys. 119, 5965 (2003)
'GGA_XC_HCTH_93' : 161, # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'GGA_XC_HCTH_P14' : 95 , # G. Menconi, P. J. Wilson, and D. J. Tozer, J. Chem. Phys. 114, 3958 (2001)
'GGA_XC_HCTH_P76' : 94 , # G. Menconi, P. J. Wilson, and D. J. Tozer, J. Chem. Phys. 114, 3958 (2001)
'GGA_XC_HLE16' : 545, # P. Verma and D. G. Truhlar, J. Phys. Chem. Lett. 8, 380 (2017)
'GGA_XC_KT1' : 167, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 119, 3015 (2003)
'GGA_XC_KT2' : 146, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 119, 3015 (2003)
'GGA_XC_KT3' : 587, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 121, 5654 (2004)
'GGA_XC_LB07' : 589, # E. Livshits and R. Baer, Phys. Chem. Chem. Phys. 9, 2932 (2007)
'GGA_XC_MOHLYP' : 194, # N. E. Schultz, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. A 109, 11127 (2005)
'GGA_XC_MOHLYP2' : 195, # J. Zheng, Y. Zhao, and D. G. Truhlar, J. Chem. Theory Comput. 5, 808 (2009)
'GGA_XC_MPWLYP1W' : 174, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'GGA_XC_NCAP' : 181, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, J. Chem. Theory Comput. 15, 303 (2019)
'GGA_XC_OBLYP_D' : 67 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'GGA_XC_OPBE_D' : 65 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'GGA_XC_OPWLYP_D' : 66 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'GGA_XC_PBE1W' : 173, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'GGA_XC_PBELYP1W' : 175, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'GGA_XC_TH1' : 154, # D. J. Tozer and N. C. Handy, J. Chem. Phys. 108, 2545 (1998)
'GGA_XC_TH2' : 155, # D. J. Tozer and N. C. Handy, J. Phys. Chem. A 102, 3162 (1998)
'GGA_XC_TH3' : 156, # N. C. Handy and D. J. Tozer, Mol. Phys. 94, 707 (1998)
'GGA_XC_TH4' : 157, # N. C. Handy and D. J. Tozer, Mol. Phys. 94, 707 (1998)
'GGA_XC_TH_FC' : 197, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_TH_FCFO' : 198, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_TH_FCO' : 199, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_TH_FL' : 196, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_VV10' : 255, # O. A. Vydrov and T. Van Voorhis, J. Chem. Phys. 133, 244103 (2010)
'GGA_XC_XLYP' : 166, # X. Xu and W. A. Goddard, Proc. Natl. Acad. Sci. U. S. A. 101, 2673 (2004)
#'HYB_GGA_X_LC2GAU' : 710, # J.-W. Song, M. A. Watson, and K. Hirao, J. Chem. Phys. 131, 144108 (2009)
#'HYB_GGA_X_LCGAU' : 708, # J.-W. Song, S. Tokura, T. Sato, M. A. Watson, and K. Hirao, J. Chem. Phys. 127, 154109 (2007)
#'HYB_GGA_X_LCGAU_CORE' : 709, # J.-W. Song, M. A. Watson, A. Nakata, and K. Hirao, J. Chem. Phys. 129, 184113 (2008)
'HYB_GGA_X_N12_SX' : 81 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'HYB_GGA_X_S12H' : 496, # M. Swart, Chem. Phys. Lett. 580, 166 (2013)
'HYB_GGA_X_SOGGA11_X' : 426, # R. Peverati and D. G. Truhlar, J. Chem. Phys. 135, 191102 (2011)
'HYB_GGA_XC_APBE0' : 607, # E. Fabiano, L. A. Constantin, P. Cortona, and F. Della Sala, J. Chem. Theory Comput. 11, 122 (2015)
'HYB_GGA_XC_APF' : 409, # A. Austin, G. A. Petersson, M. J. Frisch, F. J. Dobek, G. Scalmani, and K. Throssell, J. Chem. Theory Comput. 8, 4989 (2012)
'HYB_GGA_XC_B1LYP' : 416, # C. Adamo and V. Barone, Chem. Phys. Lett. 274, 242 (1997)
'HYB_GGA_XC_B1PW91' : 417, # C. Adamo and V. Barone, Chem. Phys. Lett. 274, 242 (1997)
'HYB_GGA_XC_B1WC' : 412, # D. I. Bilc, R. Orlando, R. Shaltaf, G.-M. Rignanese, J. Iniguez, and P. Ghosez, Phys. Rev. B 77, 165107 (2008)
#'HYB_GGA_XC_B2PLYP' : 713, # S. Grimme, J. Chem. Phys. 124, 034108 (2006)
'HYB_GGA_XC_B3LYP' : 402, # P. J. Stephens, F. J. Devlin, C. F. Chabalowski, and M. J. Frisch, J. Phys. Chem. 98, 11623 (1994)
'HYB_GGA_XC_B3LYP5' : 475, # P. J. Stephens, F. J. Devlin, C. F. Chabalowski, and M. J. Frisch, J. Phys. Chem. 98, 11623 (1994)
'HYB_GGA_XC_B3LYP_MCM1' : 461, # M. T. Caldeira and R. Custodio, J. Mol. Model. 25, 62 (2019)
'HYB_GGA_XC_B3LYP_MCM2' : 462, # M. T. Caldeira and R. Custodio, J. Mol. Model. 25, 62 (2019)
'HYB_GGA_XC_B3LYPS' : 459, # M. Reiher, O. Salomon, and B. A. Hess, Theor. Chem. Acc. 107, 48 (2001)
'HYB_GGA_XC_B3P86' : 403, # Defined through Gaussian implementation
#'HYB_GGA_XC_B3P86_NWCHEM' : 315, # Defined through NWChem implementation
'HYB_GGA_XC_B3PW91' : 401, # A. D. Becke, J. Chem. Phys. 98, 5648 (1993)
'HYB_GGA_XC_B5050LYP' : 572, # Y. Shao, M. Head-Gordon, and A. I. Krylov, J. Chem. Phys. 118, 4807 (2003)
'HYB_GGA_XC_B97' : 407, # A. D. Becke, J. Chem. Phys. 107, 8554 (1997)
'HYB_GGA_XC_B97_1' : 408, # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'HYB_GGA_XC_B97_1P' : 266, # A. J. Cohen and N. C. Handy, Chem. Phys. Lett. 316, 160 (2000)
'HYB_GGA_XC_B97_2' : 410, # P. J. Wilson, T. J. Bradley, and D. J. Tozer, J. Chem. Phys. 115, 9233 (2001)
'HYB_GGA_XC_B97_3' : 414, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 123, 121103 (2005)
'HYB_GGA_XC_B97_K' : 413, # A. D. Boese and J. M. L. Martin, J. Chem. Phys. 121, 3405 (2004)
'HYB_GGA_XC_BHANDH' : 435, # A. D. Becke, J. Chem. Phys. 98, 1372 (1993)
'HYB_GGA_XC_BHANDHLYP' : 436, # A. D. Becke, J. Chem. Phys. 98, 1372 (1993)
'HYB_GGA_XC_BLYP35' : 499, # M. Renz, K. Theilacker, C. Lambert, and M. Kaupp, J. Am. Chem. Soc. 131, 16292 (2009)
'HYB_GGA_XC_CAM_B3LYP' : 433, # T. Yanai, D. P. Tew, and N. C. Handy, Chem. Phys. Lett. 393, 51 (2004)
'HYB_GGA_XC_CAM_O3LYP' : 395, # M. P. Bircher and U. Rothlisberger, J. Chem. Theory Comput. 14, 3184 (2018)
'HYB_GGA_XC_CAM_PBEH' : 681, # W. Chen, G. Miceli, G.-M. Rignanese, and A. Pasquarello, Phys. Rev. Mater. 2, 073803 (2018)
'HYB_GGA_XC_CAM_QTP_00' : 490, # P. Verma and R. J. Bartlett, J. Chem. Phys. 140, 18A534 (2014)
'HYB_GGA_XC_CAM_QTP_01' : 482, # Y. Jin and R. J. Bartlett, J. Chem. Phys. 145, 034107 (2016)
'HYB_GGA_XC_CAM_QTP_02' : 491, # R. L. A. Haiduke and R. J. Bartlett, J. Chem. Phys. 148, 184106 (2018)
'HYB_GGA_XC_CAMH_B3LYP' : 614, # Y. Shao, Y. Mei, D. Sundholm, and V. R. I. Kaila, J. Chem. Theory Comput. 16, 587 (2020), https://doi.org/10.1021/acs.jctc.9b00823
'HYB_GGA_XC_CAMY_B3LYP' : 470, # M. Seth and T. Ziegler, J. Chem. Theory Comput. 8, 901 (2012)
'HYB_GGA_XC_CAMY_BLYP' : 455, # Y. Akinaga and S. Ten-no, Chem. Phys. Lett. 462, 348 (2008)
'HYB_GGA_XC_CAMY_PBEH' : 682, # W. Chen, G. Miceli, G.-M. Rignanese, and A. Pasquarello, Phys. Rev. Mater. 2, 073803 (2018)
'HYB_GGA_XC_CAP0' : 477, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, Theor. Chem. Acc. 135, 120 (2016)
'HYB_GGA_XC_EDF2' : 476, # C. Y. Lin, M. W. George, and P. M. W. Gill, Aust. J. Chem. 57, 365 (2004)
'HYB_GGA_XC_HAPBE' : 608, # E. Fabiano, L. A. Constantin, P. Cortona, and F. Della Sala, J. Chem. Theory Comput. 11, 122 (2015)
'HYB_GGA_XC_HFLYP' : 314, # C. Lee, W. Yang, and R. G. Parr, Phys. Rev. B 37, 785 (1988)
#'HYB_GGA_XC_HISS' : 717, # T. M. Henderson, A. F. Izmaylov, G. E. Scuseria, and A. Savin, J. Chem. Phys. 127, 221103 (2007)
'HYB_GGA_XC_HJS_B88' : 431, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HJS_B97X' : 432, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HJS_PBE' : 429, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HJS_PBE_SOL' : 430, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HPBEINT' : 472, # E. Fabiano, L. A. Constantin, and F. Della Sala, Int. J. Quantum Chem. 113, 673 (2013)
'HYB_GGA_XC_HSE03' : 427, # J. Heyd, G. E. Scuseria, and M. Ernzerhof, J. Chem. Phys. 118, 8207 (2003)
'HYB_GGA_XC_HSE06' : 428, # J. Heyd, G. E. Scuseria, and M. Ernzerhof, J. Chem. Phys. 118, 8207 (2003)
'HYB_GGA_XC_HSE12' : 479, # J. E. Moussa, P. A. Schultz, and J. R. Chelikowsky, J. Chem. Phys. 136, 204117 (2012)
'HYB_GGA_XC_HSE12S' : 480, # J. E. Moussa, P. A. Schultz, and J. R. Chelikowsky, J. Chem. Phys. 136, 204117 (2012)
'HYB_GGA_XC_HSE_SOL' : 481, # L. Schimka, J. Harl, and G. Kresse, J. Chem. Phys. 134, 024116 (2011)
'HYB_GGA_XC_KMLYP' : 485, # J. K. Kang and C. B. Musgrave, J. Chem. Phys. 115, 11040 (2001)
'HYB_GGA_XC_LC_BLYP' : 400, # L. N. Anderson, M. B. Oviedo, and B. M. Wong, J. Chem. Theory Comput. 13, 1656 (2017)
'HYB_GGA_XC_LC_BOP' : 636, # J.-W. Song, T. Hirosawa, T. Tsuneda, and K. Hirao, J. Chem. Phys. 126, 154105 (2007)
'HYB_GGA_XC_LC_PBEOP' : 637, # Y. Tawada, T. Tsuneda, S. Yanagisawa, T. Yanai, and K. Hirao, J. Chem. Phys. 120, 8425 (2004)
'HYB_GGA_XC_LC_QTP' : 492, # R. L. A. Haiduke and R. J. Bartlett, J. Chem. Phys. 148, 184106 (2018)
'HYB_GGA_XC_LC_VV10' : 469, # O. A. Vydrov and T. Van Voorhis, J. Chem. Phys. 133, 244103 (2010)
'HYB_GGA_XC_LC_WPBE' : 478, # O. A. Vydrov and G. E. Scuseria, J. Chem. Phys. 125, 234109 (2006)
'HYB_GGA_XC_LC_WPBE08_WHS' : 488, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LC_WPBE_WHS' : 486, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LC_WPBEH_WHS' : 487, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LC_WPBESOL_WHS' : 489, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LCY_BLYP' : 468, # Y. Akinaga and S. Ten-no, Chem. Phys. Lett. 462, 348 (2008)
'HYB_GGA_XC_LCY_PBE' : 467, # M. Seth and T. Ziegler, J. Chem. Theory Comput. 8, 901 (2012)
'HYB_GGA_XC_LRC_WPBE' : 473, # M. A. Rohrdanz, K. M. Martins, and J. M. Herbert, J. Chem. Phys. 130, 054112 (2009)
'HYB_GGA_XC_LRC_WPBEH' : 465, # M. A. Rohrdanz, K. M. Martins, and J. M. Herbert, J. Chem. Phys. 130, 054112 (2009)
'HYB_GGA_XC_MB3LYP_RC04' : 437, # V. Tognetti, P. Cortona, and C. Adamo, Chem. Phys. Lett. 439, 381 (2007)
'HYB_GGA_XC_MPW1K' : 405, # B. J. Lynch, P. L. Fast, M. Harris, and D. G. Truhlar, J. Phys. Chem. A 104, 4811 (2000)
'HYB_GGA_XC_MPW1LYP' : 483, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPW1PBE' : 484, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPW1PW' : 418, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPW3LYP' : 419, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_GGA_XC_MPW3PW' : 415, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPWLYP1M' : 453, # N. E. Schultz, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. A 109, 11127 (2005)
'HYB_GGA_XC_O3LYP' : 404, # A. J. Cohen and N. C. Handy, Mol. Phys. 99, 607 (2001)
'HYB_GGA_XC_PBE0_13' : 456, # P. Cortona, J. Chem. Phys. 136, 086101 (2012)
'HYB_GGA_XC_PBE50' : 290, # Y. A. Bernard, Y. Shao, and A. I. Krylov, J. Chem. Phys. 136, 204103 (2012)
'HYB_GGA_XC_PBE_MOL0' : 273, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBE_MOLB0' : 276, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBE_SOL0' : 274, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBEB0' : 275, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBEH' : 406, # C. Adamo and V. Barone, J. Chem. Phys. 110, 6158 (1999)
'HYB_GGA_XC_QTP17' : 460, # Y. Jin and R. J. Bartlett, J. Chem. Phys. 149, 064111 (2018)
'HYB_GGA_XC_RCAM_B3LYP' : 610, # A. J. Cohen, P. Mori-Sanchez, and W. Yang, J. Chem. Phys. 126, 191109 (2007)
'HYB_GGA_XC_REVB3LYP' : 454, # L. Lu, H. Hu, H. Hou, and B. Wang, Comput. Theor. Chem. 1015, 64 (2013)
'HYB_GGA_XC_SB98_1A' : 420, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_1B' : 421, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_1C' : 422, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_2A' : 423, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_2B' : 424, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_2C' : 425, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
#'HYB_GGA_XC_SRC1_BLYP' : 714, # N. A. Besley, M. J. G. Peach, and D. J. Tozer, Phys. Chem. Chem. Phys. 11, 10350 (2009)
#'HYB_GGA_XC_SRC2_BLYP' : 715, # N. A. Besley, M. J. G. Peach, and D. J. Tozer, Phys. Chem. Chem. Phys. 11, 10350 (2009)
'HYB_GGA_XC_TUNED_CAM_B3LYP' : 434, # K. Okuno, Y. Shigeta, R. Kishi, H. Miyasaka, and M. Nakano, J. Photochem. Photobiol., A 235, 29 (2012)
'HYB_GGA_XC_WB97' : 463, # J.-D. Chai and M. Head-Gordon, J. Chem. Phys. 128, 084106 (2008)
'HYB_GGA_XC_WB97X' : 464, # J.-D. Chai and M. Head-Gordon, J. Chem. Phys. 128, 084106 (2008)
'HYB_GGA_XC_WB97X_D' : 471, # J.-D. Chai and M. Head-Gordon, Phys. Chem. Chem. Phys. 10, 6615 (2008)
'HYB_GGA_XC_WB97X_D3' : 399, # Y.-S. Lin, G.-D. Li, S.-P. Mao, and J.-D. Chai, J. Chem. Theory Comput. 9, 263 (2013)
'HYB_GGA_XC_WB97X_V' : 466, # N. Mardirossian and M. Head-Gordon, Phys. Chem. Chem. Phys. 16, 9904 (2014)
'HYB_GGA_XC_WC04' : 611, # K. W. Wiitala, T. R. Hoye, and C. J. Cramer, J. Chem. Theory Comput. 2, 1085 (2006)
'HYB_GGA_XC_WHPBE0' : 615, # Y. Shao, Y. Mei, D. Sundholm, and V. R. I. Kaila, J. Chem. Theory Comput. 16, 587 (2020), https://doi.org/10.1021/acs.jctc.9b00823
'HYB_GGA_XC_WP04' : 612, # K. W. Wiitala, T. R. Hoye, and C. J. Cramer, J. Chem. Theory Comput. 2, 1085 (2006)
'HYB_GGA_XC_X3LYP' : 411, # X. Xu and W. A. Goddard, Proc. Natl. Acad. Sci. U. S. A. 101, 2673 (2004)
'MGGA_C_B88' : 571, # A. D. Becke, J. Chem. Phys. 88, 1053 (1988)
'MGGA_C_B94' : 397, # A. D. Becke, Int. J. Quantum Chem. 52, 625 (1994)
'MGGA_C_BC95' : 240, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'MGGA_C_CS' : 72 , # R. Colle and O. Salvetti, Theor. Chim. Acta 37, 329 (1975)
'MGGA_C_DLDF' : 37 , # K. Pernal, R. Podeszwa, K. Patkowski, and K. Szalewicz, Phys. Rev. Lett. 103, 263201 (2009)
'MGGA_C_HLTAPW' : 699, # S. Lehtola and M. A. L. Marques, Meta-local density functionals: a new rung on jacob's ladder, (2020), arXiv:2006.16835 [physics.chem-ph]
'MGGA_C_KCIS' : 562, # J. Rey and A. Savin, Int. J. Quantum Chem. 69, 581 (1998)
'MGGA_C_KCISK' : 638, # J. Rey and A. Savin, Int. J. Quantum Chem. 69, 581 (1998)
'MGGA_C_M05' : 237, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Phys. 123, 161103 (2005)
'MGGA_C_M05_2X' : 238, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Theory Comput. 2, 364 (2006)
'MGGA_C_M06' : 235, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'MGGA_C_M06_2X' : 236, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'MGGA_C_M06_HF' : 234, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 110, 13126 (2006)
'MGGA_C_M06_L' : 233, # Y. Zhao and D. G. Truhlar, J. Chem. Phys. 125, 194101 (2006)
'MGGA_C_M06_SX' : 311, # Y. Wang, P. Verma, L. Zhang, Y. Li, Z. Liu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 117, 2294 (2020), https://www.pnas.org/content/117/5/2294.full.pdf
'MGGA_C_M08_HX' : 78 , # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'MGGA_C_M08_SO' : 77 , # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'MGGA_C_M11' : 76 , # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 2, 2810 (2011)
'MGGA_C_M11_L' : 75 , # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 3, 117 (2012)
'MGGA_C_MN12_L' : 74 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 13171 (2012)
'MGGA_C_MN12_SX' : 73 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'MGGA_C_MN15' : 269, # H. S. Yu, X. He, S. L. Li, and D. G. Truhlar, Chem. Sci. 7, 5032 (2016)
'MGGA_C_MN15_L' : 261, # H. S. Yu, X. He, and D. G. Truhlar, J. Chem. Theory Comput. 12, 1280 (2016)
'MGGA_C_PKZB' : 239, # J. P. Perdew, S. Kurth, A. Zupan, and P. Blaha, Phys. Rev. Lett. 82, 2544 (1999)
'MGGA_C_R2SCAN' : 498, # J. W. Furness, A. D. Kaplan, J. Ning, J. P. Perdew, and J. Sun, J. Phys. Chem. Lett. 11, 8208 (2020)
'MGGA_C_R2SCANL' : 719, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. B 102, 121109 (2020)
'MGGA_C_REVM06' : 306, # Y. Wang, P. Verma, X. Jin, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 115, 10257 (2018)
'MGGA_C_REVM06_L' : 294, # Y. Wang, X. Jin, H. S. Yu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 114, 8487 (2017)
'MGGA_C_REVM11' : 172, # P. Verma, Y. Wang, S. Ghosh, X. He, and D. G. Truhlar, J. Phys. Chem. A 123, 2966 (2019)
'MGGA_C_REVSCAN' : 582, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'MGGA_C_REVSCAN_VV10' : 585, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'MGGA_C_REVTM' : 694, # S. Jana, K. Sharma, and P. Samal, J. Phys. Chem. A 123, 6356 (2019)
'MGGA_C_REVTPSS' : 241, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, L. A. Constantin, and J. Sun, Phys. Rev. Lett. 103, 026403 (2009)
'MGGA_C_RSCAN' : 494, # A. P. Bartok and J. R. Yates, J. Chem. Phys. 150, 161101 (2019)
'MGGA_C_SCAN' : 267, # J. Sun, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. Lett. 115, 036402 (2015)
'MGGA_C_SCAN_RVV10' : 292, # H. Peng, Z.-H. Yang, J. P. Perdew, and J. Sun, Phys. Rev. X 6, 041005 (2016)
'MGGA_C_SCAN_VV10' : 584, # J. G. Brandenburg, J. E. Bates, J. Sun, and J. P. Perdew, Phys. Rev. B 94, 115144 (2016)
'MGGA_C_SCANL' : 702, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_C_SCANL_RVV10' : 703, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_C_SCANL_VV10' : 704, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_C_TM' : 251, # J. Tao and Y. Mo, Phys. Rev. Lett. 117, 073001 (2016)
'MGGA_C_TPSS' : 231, # J. Tao, J. P. Perdew, V. N. Staroverov, and G. E. Scuseria, Phys. Rev. Lett. 91, 146401 (2003)
'MGGA_C_TPSSLOC' : 247, # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 86, 035130 (2012)
'MGGA_C_VSXC' : 232, # T. V. Voorhis and G. E. Scuseria, J. Chem. Phys. 109, 400 (1998)
'MGGA_K_CSK1' : 629, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_CSK4' : 630, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_CSK_LOC1' : 631, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_CSK_LOC4' : 632, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_GEA2' : 627, # A. S. Kompaneets and E. S. Pavlovskii, Zh. Eksp. Teor. Fiz. 31, 427 (1956), [J. Exp. Theor. Phys. 4, 328 (1957)]
'MGGA_K_GEA4' : 628, # C. H. Hodges, Can. J. Phys. 51, 1428 (1973)
'MGGA_K_L04' : 617, # S. Laricchia, L. A. Constantin, E. Fabiano, and F. Della Sala, J. Chem. Theory Comput. 10, 164 (2014)
'MGGA_K_L06' : 618, # S. Laricchia, L. A. Constantin, E. Fabiano, and F. Della Sala, J. Chem. Theory Comput. 10, 164 (2014)
'MGGA_K_PC07' : 543, # J. P. Perdew and L. A. Constantin, Phys. Rev. B 75, 155109 (2007)
'MGGA_K_PC07_OPT' : 634, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_K_PGSL025' : 220, # L. A. Constantin, E. Fabiano, and F. Della Sala, J. Phys. Chem. Lett. 9, 4385 (2018), pMID: 30019904
'MGGA_K_RDA' : 621, # V. V. Karasiev, R. S. Jones, S. B. Trickey, and F. E. Harris, Phys. Rev. B 80, 245120 (2009)
'MGGA_X_2D_JS17' : 609, # S. Jana and P. Samal, J. Phys. Chem. A 121, 4804 (2017)
'MGGA_X_2D_PRHG07' : 210, # S. Pittalis, E. Rasanen, N. Helbig, and E. K. U. Gross, Phys. Rev. B 76, 235314 (2007)
'MGGA_X_2D_PRHG07_PRP10' : 211, # S. Pittalis, E. Rasanen, N. Helbig, and E. K. U. Gross, Phys. Rev. B 76, 235314 (2007)
'MGGA_X_B00' : 284, # A. D. Becke, J. Chem. Phys. 112, 4020 (2000)
'MGGA_X_BJ06' : 207, # A. D. Becke and E. R. Johnson, J. Chem. Phys. 124, 221101 (2006)
'MGGA_X_BLOC' : 244, # L. A. Constantin, E. Fabiano, and F. Della Sala, J. Chem. Theory Comput. 9, 2256 (2013)
'MGGA_X_BR89' : 206, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_BR89_1' : 214, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_BR89_EXPLICIT' : 586, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_BR89_EXPLICIT_1' : 602, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_EDMGGA' : 686, # J. Tao, J. Chem. Phys. 115, 3519 (2001)
'MGGA_X_GDME_0' : 689, # R. M. Koehl, G. K. Odom, and G. E. Scuseria, Mol. Phys. 87, 835 (1996)
'MGGA_X_GDME_KOS' : 690, # R. M. Koehl, G. K. Odom, and G. E. Scuseria, Mol. Phys. 87, 835 (1996)
'MGGA_X_GDME_NV' : 687, # J. W. Negele and D. Vautherin, Phys. Rev. C 5, 1472 (1972)
'MGGA_X_GDME_VT' : 691, # R. M. Koehl, G. K. Odom, and G. E. Scuseria, Mol. Phys. 87, 835 (1996)
'MGGA_X_GVT4' : 204, # T. V. Voorhis and G. E. Scuseria, J. Chem. Phys. 109, 400 (1998)
'MGGA_X_GX' : 575, # P.-F. Loos, J. Chem. Phys. 146, 114108 (2017)
'MGGA_X_HLTA' : 698, # S. Lehtola and M. A. L. Marques, Meta-local density functionals: a new rung on jacob's ladder, (2020), arXiv:2006.16835 [physics.chem-ph]
'MGGA_X_JK' : 256, # P. Jemmer and P. J. Knowles, Phys. Rev. A 51, 3571 (1995)
'MGGA_X_LTA' : 201, # M. Ernzerhof and G. E. Scuseria, J. Chem. Phys. 111, 911 (1999)
'MGGA_X_M06_L' : 203, # Y. Zhao and D. G. Truhlar, J. Chem. Phys. 125, 194101 (2006)
'MGGA_X_M11_L' : 226, # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 3, 117 (2012)
'MGGA_X_MBEEF' : 249, # J. Wellendorff, K. T. Lundgaard, K. W. Jacobsen, and T. Bligaard, J. Chem. Phys. 140, 144107 (2014)
'MGGA_X_MBEEFVDW' : 250, # K. T. Lundgaard, J. Wellendorff, J. Voss, K. W. Jacobsen, and T. Bligaard, Phys. Rev. B 93, 235162 (2016)
'MGGA_X_MBR' : 716, # A. Patra, S. Jana, H. Myneni, and P. Samal, Phys. Chem. Chem. Phys. 21, 19639 (2019)
'MGGA_X_MBRXC_BG' : 696, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 045147 (2019)
'MGGA_X_MBRXH_BG' : 697, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 045147 (2019)
'MGGA_X_MGGAC' : 711, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 155140 (2019)
'MGGA_X_MK00' : 230, # F. R. Manby and P. J. Knowles, J. Chem. Phys. 112, 7002 (2000)
'MGGA_X_MK00B' : 243, # F. R. Manby and P. J. Knowles, J. Chem. Phys. 112, 7002 (2000)
'MGGA_X_MN12_L' : 227, # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 13171 (2012)
'MGGA_X_MN15_L' : 260, # H. S. Yu, X. He, and D. G. Truhlar, J. Chem. Theory Comput. 12, 1280 (2016)
'MGGA_X_MODTPSS' : 245, # J. P. Perdew, A. Ruzsinszky, J. Tao, G. I. Csonka, and G. E. Scuseria, Phys. Rev. A 76, 042506 (2007)
'MGGA_X_MS0' : 221, # J. Sun, B. Xiao, and A. Ruzsinszky, J. Chem. Phys. 137, 051101 (2012)
'MGGA_X_MS1' : 222, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'MGGA_X_MS2' : 223, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'MGGA_X_MS2_REV' : 228, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'MGGA_X_MS2B' : 300, # J. W. Furness and J. Sun, Phys. Rev. B 99, 041119 (2019)
'MGGA_X_MS2BS' : 301, # J. W. Furness and J. Sun, ArXiv e-prints (2018), arXiv:1805.11707v1 [physics.chem-ph]
'MGGA_X_MVS' : 257, # J. Sun, J. P. Perdew, and A. Ruzsinszky, Proc. Natl. Acad. Sci. U. S. A. 112, 685 (2015)
'MGGA_X_MVSB' : 302, # J. W. Furness and J. Sun, ArXiv e-prints (2018), arXiv:1805.11707v1 [physics.chem-ph]
'MGGA_X_MVSBS' : 303, # J. W. Furness and J. Sun, ArXiv e-prints (2018), arXiv:1805.11707v1 [physics.chem-ph]
'MGGA_X_PBE_GX' : 576, # P.-F. Loos, J. Chem. Phys. 146, 114108 (2017)
'MGGA_X_PKZB' : 213, # J. P. Perdew, S. Kurth, A. Zupan, and P. Blaha, Phys. Rev. Lett. 82, 2544 (1999)
'MGGA_X_R2SCAN' : 497, # J. W. Furness, A. D. Kaplan, J. Ning, J. P. Perdew, and J. Sun, J. Phys. Chem. Lett. 11, 8208 (2020)
'MGGA_X_R2SCANL' : 718, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. B 102, 121109 (2020)
'MGGA_X_REGTPSS' : 603, # A. Ruzsinszky, J. Sun, B. Xiao, and G. I. Csonka, J. Chem. Theory Comput. 8, 2078 (2012)
'MGGA_X_REVM06_L' : 293, # Y. Wang, X. Jin, H. S. Yu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 114, 8487 (2017)
'MGGA_X_REVSCAN' : 581, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'MGGA_X_REVSCANL' : 701, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_X_REVTM' : 693, # S. Jana, K. Sharma, and P. Samal, J. Phys. Chem. A 123, 6356 (2019)
'MGGA_X_REVTPSS' : 212, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, L. A. Constantin, and J. Sun, Phys. Rev. Lett. 103, 026403 (2009)
'MGGA_X_RLDA' : 688, # X. Campi and A. Bouyssy, Phys. Lett. B 73, 263 (1978)
'MGGA_X_RPP09' : 209, # E. Rasanen, S. Pittalis, and C. R. Proetto, J. Chem. Phys. 132, 044112 (2010)
'MGGA_X_RSCAN' : 493, # A. P. Bartok and J. R. Yates, J. Chem. Phys. 150, 161101 (2019)
'MGGA_X_RTPSS' : 299, # A. J. Garza, A. T. Bell, and M. Head-Gordon, J. Chem. Theory Comput. 14, 3083 (2018)
'MGGA_X_SA_TPSS' : 542, # L. A. Constantin, E. Fabiano, J. M. Pitarke, and F. Della Sala, Phys. Rev. B 93, 115127 (2016)
'MGGA_X_SCAN' : 263, # J. Sun, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. Lett. 115, 036402 (2015)
'MGGA_X_SCANL' : 700, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_X_TASK' : 707, # T. Aschebrock and S. Kummel, Phys. Rev. Res. 1, 033082 (2019)
'MGGA_X_TAU_HCTH' : 205, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'MGGA_X_TB09' : 208, # F. Tran and P. Blaha, Phys. Rev. Lett. 102, 226401 (2009)
'MGGA_X_TH' : 225, # T. Tsuneda and K. Hirao, Phys. Rev. B 62, 15527 (2000)
'MGGA_X_TLDA' : 685, # F. G. Eich and M. Hellgren, J. Chem. Phys. 141, 224107 (2014)
'MGGA_X_TM' : 540, # J. Tao and Y. Mo, Phys. Rev. Lett. 117, 073001 (2016)
'MGGA_X_TPSS' : 202, # J. Tao, J. P. Perdew, V. N. Staroverov, and G. E. Scuseria, Phys. Rev. Lett. 91, 146401 (2003)
'MGGA_X_VT84' : 541, # J. M. del Campo, J. L. Gazquez, S. Trickey, and A. Vela, Chem. Phys. Lett. 543, 179 (2012)
'MGGA_XC_B97M_V' : 254, # N. Mardirossian and M. Head-Gordon, J. Chem. Phys. 142, 074111 (2015)
'MGGA_XC_CC06' : 229, # A. C. Cancio and M. Y. Chou, Phys. Rev. B 74, 081202 (2006)
'MGGA_XC_HLE17' : 288, # P. Verma and D. G. Truhlar, J. Phys. Chem. C 121, 7144 (2017)
'MGGA_XC_LP90' : 564, # C. Lee and R. G. Parr, Phys. Rev. A 42, 193 (1990)
'MGGA_XC_OTPSS_D' : 64 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'MGGA_XC_TPSSLYP1W' : 242, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'MGGA_XC_ZLP' : 42 , # Q. Zhao, M. Levy, and R. G. Parr, Phys. Rev. A 47, 918 (1993)
'HYB_MGGA_X_BMK' : 279, # A. D. Boese and J. M. L. Martin, J. Chem. Phys. 121, 3405 (2004)
'HYB_MGGA_X_DLDF' : 36 , # K. Pernal, R. Podeszwa, K. Patkowski, and K. Szalewicz, Phys. Rev. Lett. 103, 263201 (2009)
'HYB_MGGA_X_JS18' : 705, # S. Jana and P. Samal, Phys. Chem. Chem. Phys. 20, 8999 (2018)
'HYB_MGGA_X_M05' : 438, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Phys. 123, 161103 (2005)
'HYB_MGGA_X_M05_2X' : 439, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Theory Comput. 2, 364 (2006)
'HYB_MGGA_X_M06' : 449, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'HYB_MGGA_X_M06_2X' : 450, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'HYB_MGGA_X_M06_HF' : 444, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 110, 13126 (2006)
'HYB_MGGA_X_M06_SX' : 310, # Y. Wang, P. Verma, L. Zhang, Y. Li, Z. Liu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 117, 2294 (2020), https://www.pnas.org/content/117/5/2294.full.pdf
'HYB_MGGA_X_M08_HX' : 295, # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'HYB_MGGA_X_M08_SO' : 296, # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'HYB_MGGA_X_M11' : 297, # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 2, 2810 (2011)
'HYB_MGGA_X_MN12_SX' : 248, # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'HYB_MGGA_X_MN15' : 268, # H. S. Yu, X. He, S. L. Li, and D. G. Truhlar, Chem. Sci. 7, 5032 (2016)
'HYB_MGGA_X_MS2H' : 224, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'HYB_MGGA_X_MVSH' : 474, # J. Sun, J. P. Perdew, and A. Ruzsinszky, Proc. Natl. Acad. Sci. U. S. A. 112, 685 (2015)
'HYB_MGGA_X_PJS18' : 706, # B. Patra, S. Jana, and P. Samal, Phys. Chem. Chem. Phys. 20, 8991 (2018)
'HYB_MGGA_X_REVM06' : 305, # Y. Wang, P. Verma, X. Jin, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 115, 10257 (2018)
'HYB_MGGA_X_REVM11' : 304, # P. Verma, Y. Wang, S. Ghosh, X. He, and D. G. Truhlar, J. Phys. Chem. A 123, 2966 (2019)
'HYB_MGGA_X_REVSCAN0' : 583, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'HYB_MGGA_X_SCAN0' : 264, # K. Hui and J.-D. Chai, J. Chem. Phys. 144, 044114 (2016)
'HYB_MGGA_X_TAU_HCTH' : 282, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'HYB_MGGA_XC_B0KCIS' : 563, # J. Toulouse, A. Savin, and C. Adamo, J. Chem. Phys. 117, 10465 (2002)
'HYB_MGGA_XC_B86B95' : 441, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'HYB_MGGA_XC_B88B95' : 440, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'HYB_MGGA_XC_B94_HYB' : 398, # A. D. Becke, Int. J. Quantum Chem. 52, 625 (1994)
'HYB_MGGA_XC_B98' : 598, # A. D. Becke, J. Chem. Phys. 109, 2092 (1998)
'HYB_MGGA_XC_BB1K' : 443, # Y. Zhao, B. J. Lynch, and D. G. Truhlar, J. Phys. Chem. A 108, 2715 (2004)
'HYB_MGGA_XC_EDMGGAH' : 695, # J. Tao, J. Chem. Phys. 116, 2335 (2002)
'HYB_MGGA_XC_MPW1B95' : 445, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_MGGA_XC_MPW1KCIS' : 566, # Y. Zhao, N. Gonzalez-Garcia, and D. G. Truhlar, J. Phys. Chem. A 109, 2012 (2005)
'HYB_MGGA_XC_MPWB1K' : 446, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_MGGA_XC_MPWKCIS1K' : 567, # Y. Zhao, N. Gonzalez-Garcia, and D. G. Truhlar, J. Phys. Chem. A 109, 2012 (2005)
'HYB_MGGA_XC_PBE1KCIS' : 568, # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 1, 415 (2005)
'HYB_MGGA_XC_PW6B95' : 451, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 109, 5656 (2005)
'HYB_MGGA_XC_PW86B95' : 442, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'HYB_MGGA_XC_PWB6K' : 452, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 109, 5656 (2005)
'HYB_MGGA_XC_REVTPSSH' : 458, # G. I. Csonka, J. P. Perdew, and A. Ruzsinszky, J. Chem. Theory Comput. 6, 3688 (2010)
'HYB_MGGA_XC_TPSS0' : 396, # S. Grimme, J. Phys. Chem. A 109, 3067 (2005)
'HYB_MGGA_XC_TPSS1KCIS' : 569, # Y. Zhao, B. J. Lynch, and D. G. Truhlar, Phys. Chem. Chem. Phys. 7, 43 (2005)
'HYB_MGGA_XC_TPSSH' : 457, # V. N. Staroverov, G. E. Scuseria, J. Tao, and J. P. Perdew, J. Chem. Phys. 119, 12129 (2003)
'HYB_MGGA_XC_WB97M_V' : 531, # N. Mardirossian and M. Head-Gordon, J. Chem. Phys. 144, 214110 (2016)
'HYB_MGGA_XC_X1B95' : 447, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_MGGA_XC_XB1K' : 448, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
}
#PROBLEMATIC_XC = dict([(XC_CODES[x], x) for x in
# ('GGA_C_SPBE', 'MGGA_X_REVTPSS')])
PROBLEMATIC_XC = dict([])
def _xc_key_without_underscore(xc_keys):
new_xc = []
for key, xc_id in xc_keys.items():
for delimeter in ('_XC_', '_X_', '_C_', '_K_'):
if delimeter in key:
key0, key1 = key.split(delimeter)
new_key1 = key1.replace('_', '').replace('-', '')
if key1 != new_key1:
new_xc.append((key0+delimeter+new_key1, xc_id))
break
return new_xc
XC_CODES.update(_xc_key_without_underscore(XC_CODES))
del(_xc_key_without_underscore)
#
# alias
#
XC_CODES.update({
'GGA_C_BCGP' : 'GGA_C_ACGGA',
'LDA' : 1 ,
'SLATER' : 1 ,
'VWN3' : 8,
'VWNRPA' : 8,
'VWN5' : 7,
'B88' : 106,
'PBE0' : 406,
'PBE1PBE' : 406,
'OPTXCORR' : '0.7344536875999693*SLATER - 0.6984752285760186*OPTX,',
'B3LYP' : 'B3LYP5', # VWN5 version
'B3LYP5' : '.2*HF + .08*SLATER + .72*B88, .81*LYP + .19*VWN',
'B3LYPG' : 402, # VWN3, used by Gaussian
'B3P86' : 'B3P865', # VWN5 version
'B3P865' : '.2*HF + .08*SLATER + .72*B88, .81*P86 + .19*VWN',
# FIXME: Check if Gaussian takes a different form for B3P86
#'B3P86G' : 403, # VWN3, used by Gaussian
'B3P86G' : '.2*HF + .08*SLATER + .72*B88, .81*P86 + .19*VWN3',
'B3PW91' : 'B3PW915',
'B3PW915' : '.2*HF + .08*SLATER + .72*B88, .81*PW91 + .19*VWN',
#'B3PW91G' : '.2*HF + .08*SLATER + .72*B88, .81*PW91 + .19*VWN3',
'B3PW91G' : 401,
#'O3LYP5' : '.1161*HF + .9262*SLATER + .8133*OPTXCORR, .81*LYP + .19*VWN5',
#'O3LYPG' : '.1161*HF + .9262*SLATER + .8133*OPTXCORR, .81*LYP + .19*VWN3',
'O3LYP' : 404, # in libxc == '.1161*HF + 0.071006917*SLATER + .8133*OPTX, .81*LYP + .19*VWN5', may be erroreous
'MPW3PW' : 'MPW3PW5', # VWN5 version
'MPW3PW5' : '.2*HF + .08*SLATER + .72*MPW91, .81*PW91 + .19*VWN',
'MPW3PWG' : 415, # VWN3, used by Gaussian
'MPW3LYP' : 'MPW3LYP5', # VWN5 version
'MPW3LYP5' : '.218*HF + .073*SLATER + .709*MPW91, .871*LYP + .129*VWN',
'MPW3LYPG' : 419, # VWN3, used by Gaussian
'REVB3LYP' : 'REVB3LYP5', # VWN5 version
'REVB3LYP5' : '.2*HF + .13*SLATER + .67*B88, .84*LYP + .16*VWN',
'REVB3LYPG' : 454, # VWN3, used by Gaussian
'X3LYP' : 'X3LYP5', # VWN5 version
'X3LYP5' : '.218*HF + .073*SLATER + .542385*B88 + .166615*PW91, .871*LYP + .129*VWN',
'X3LYPG' : 411, # VWN3, used by Gaussian
'CAMB3LYP' : 'HYB_GGA_XC_CAM_B3LYP',
'CAMYBLYP' : 'HYB_GGA_XC_CAMY_BLYP',
'CAMYB3LYP' : 'HYB_GGA_XC_CAMY_B3LYP',
'B5050LYP' : '.5*HF + .08*SLATER + .42*B88, .81*LYP + .19*VWN',
'MPW1LYP' : '.25*HF + .75*MPW91, LYP',
'MPW1PBE' : '.25*HF + .75*MPW91, PBE',
'PBE50' : '.5*HF + .5*PBE, PBE',
'REVPBE0' : '.25*HF + .75*PBE_R, PBE',
'B1B95' : 440,
'TPSS0' : '.25*HF + .75*TPSS, TPSS',
}) # noqa: E501
XC_KEYS = set(XC_CODES.keys())
# Some XC functionals have conventional name, like M06-L means M06-L for X
# functional and M06-L for C functional, PBE mean PBE-X plus PBE-C. If the
# conventional name was placed in the XC_CODES, it may lead to recursive
# reference when parsing the xc description. These names (as exceptions of
# XC_CODES) are listed in XC_ALIAS below and they should be treated as a
# shortcut for XC functional.
XC_ALIAS = {
# Conventional name : name in XC_CODES
'BLYP' : 'B88,LYP',
'BP86' : 'B88,P86',
'PW91' : 'PW91,PW91',
'PBE' : 'PBE,PBE',
'REVPBE' : 'PBE_R,PBE',
'PBESOL' : 'PBE_SOL,PBE_SOL',
'PKZB' : 'PKZB,PKZB',
'TPSS' : 'TPSS,TPSS',
'REVTPSS' : 'REVTPSS,REVTPSS',
'SCAN' : 'SCAN,SCAN',
'RSCAN' : 'RSCAN,RSCAN',
'R2SCAN' : 'R2SCAN,R2SCAN',
'SCANL' : 'SCANL,SCANL',
'R2SCANL' : 'R2SCANL,R2SCANL',
'SOGGA' : 'SOGGA,PBE',
'BLOC' : 'BLOC,TPSSLOC',
'OLYP' : 'OPTX,LYP',
'OPBE' : 'OPTX,PBE',
'RPBE' : 'RPBE,PBE',
'BPBE' : 'B88,PBE',
'MPW91' : 'MPW91,PW91',
'HFLYP' : 'HF,LYP',
'HFPW92' : 'HF,PW_MOD',
'SPW92' : 'SLATER,PW_MOD',
'SVWN' : 'SLATER,VWN',
'MS0' : 'MS0,REGTPSS',
'MS1' : 'MS1,REGTPSS',
'MS2' : 'MS2,REGTPSS',
'MS2H' : 'MS2H,REGTPSS',
'MVS' : 'MVS,REGTPSS',
'MVSH' : 'MVSH,REGTPSS',
'SOGGA11' : 'SOGGA11,SOGGA11',
'SOGGA11_X' : 'SOGGA11_X,SOGGA11_X',
'KT1' : 'KT1,VWN',
'KT2' : 'GGA_XC_KT2',
'KT3' : 'GGA_XC_KT3',
'DLDF' : 'DLDF,DLDF',
'GAM' : 'GAM,GAM',
'M06_L' : 'M06_L,M06_L',
'M06_SX' : 'M06_SX,M06_SX',
'M11_L' : 'M11_L,M11_L',
'MN12_L' : 'MN12_L,MN12_L',
'MN15_L' : 'MN15_L,MN15_L',
'N12' : 'N12,N12',
'N12_SX' : 'N12_SX,N12_SX',
'MN12_SX' : 'MN12_SX,MN12_SX',
'MN15' : 'MN15,MN15',
'MBEEF' : 'MBEEF,PBE_SOL',
'SCAN0' : 'SCAN0,SCAN',
'PBEOP' : 'PBE,OP_PBE',
'BOP' : 'B88,OP_B88',
# new in libxc-4.2.3
'REVSCAN' : 'MGGA_X_REVSCAN,MGGA_C_REVSCAN',
'REVSCAN_VV10' : 'MGGA_X_REVSCAN,MGGA_C_REVSCAN_VV10',
'SCAN_VV10' : 'MGGA_X_SCAN,MGGA_C_SCAN_VV10',
'SCAN_RVV10' : 'MGGA_X_SCAN,MGGA_C_SCAN_RVV10',
'M05' : 'HYB_MGGA_X_M05,MGGA_C_M05',
'M06' : 'HYB_MGGA_X_M06,MGGA_C_M06',
'M05_2X' : 'HYB_MGGA_X_M05_2X,MGGA_C_M05_2X',
'M06_2X' : 'HYB_MGGA_X_M06_2X,MGGA_C_M06_2X',
# extra aliases
'SOGGA11X' : 'SOGGA11_X',
'M06L' : 'M06_L',
'M11L' : 'M11_L',
'MN12L' : 'MN12_L',
'MN15L' : 'MN15_L',
'N12SX' : 'N12_SX',
'MN12SX' : 'MN12_SX',
'M052X' : 'M05_2X',
'M062X' : 'M06_2X',
} # noqa: E122
XC_ALIAS.update([(key.replace('-',''), XC_ALIAS[key])
for key in XC_ALIAS if '-' in key])
VV10_XC = set(('B97M_V', 'WB97M_V', 'WB97X_V', 'VV10', 'LC_VV10',
'REVSCAN_VV10',
'SCAN_VV10', 'SCAN_RVV10', 'SCANL_VV10', 'SCANL_RVV10'))
VV10_XC = VV10_XC.union(set([x.replace('_', '') for x in VV10_XC]))
def xc_reference(xc_code):
'''Returns the reference to the individual XC functional'''
hyb, fn_facs = parse_xc(xc_code)
refs = []
c_refs = (ctypes.c_char_p * 8)()
for xid, fac in fn_facs:
_itrf.LIBXC_xc_reference(xid, c_refs)
for ref in c_refs:
if ref:
refs.append(ref.decode("UTF-8"))
return refs
def xc_type(xc_code):
if xc_code is None:
return None
elif isinstance(xc_code, str):
if is_nlc(xc_code):
return 'NLC'
hyb, fn_facs = parse_xc(xc_code)
else:
fn_facs = [(xc_code, 1)] # mimic fn_facs
if not fn_facs:
return 'HF'
elif all(_itrf.LIBXC_is_lda(ctypes.c_int(xid)) for xid, fac in fn_facs):
return 'LDA'
elif any(_itrf.LIBXC_is_meta_gga(ctypes.c_int(xid)) for xid, fac in fn_facs):
return 'MGGA'
else:
# any(_itrf.LIBXC_is_gga(ctypes.c_int(xid)) for xid, fac in fn_facs)
# include hybrid_xc
return 'GGA'
def is_lda(xc_code):
return xc_type(xc_code) == 'LDA'
def is_hybrid_xc(xc_code):
if xc_code is None:
return False
elif isinstance(xc_code, str):
if xc_code.isdigit():
return _itrf.LIBXC_is_hybrid(ctypes.c_int(int(xc_code)))
else:
if 'HF' in xc_code:
return True
if hybrid_coeff(xc_code) != 0:
return True
if rsh_coeff(xc_code) != [0, 0, 0]:
return True
return False
elif isinstance(xc_code, int):
return _itrf.LIBXC_is_hybrid(ctypes.c_int(xc_code))
else:
return any((is_hybrid_xc(x) for x in xc_code))
def is_meta_gga(xc_code):
return xc_type(xc_code) == 'MGGA'
def is_gga(xc_code):
return xc_type(xc_code) == 'GGA'
def needs_laplacian(xc_code):
return _itrf.LIBXC_needs_laplacian(xc_code) != 0
def is_nlc(xc_code):
return '__VV10' in xc_code.upper()
def max_deriv_order(xc_code):
hyb, fn_facs = parse_xc(xc_code)
if fn_facs:
return min(_itrf.LIBXC_max_deriv_order(ctypes.c_int(xid)) for xid, fac in fn_facs)
else:
return 3
def test_deriv_order(xc_code, deriv, raise_error=False):
support = deriv <= max_deriv_order(xc_code)
if not support and raise_error:
from pyscf.dft import xcfun
msg = ('libxc library does not support derivative order %d for %s' %
(deriv, xc_code))
try:
if xcfun.test_deriv_order(xc_code, deriv, raise_error=False):
msg += ('''
This functional derivative is supported in the xcfun library.
The following code can be used to change the libxc library to xcfun library:
from pyscf.dft import xcfun
mf._numint.libxc = xcfun
''')
raise NotImplementedError(msg)
except KeyError as e:
sys.stderr.write('\n'+msg+'\n')
sys.stderr.write('%s not found in xcfun library\n\n' % xc_code)
raise e
return support
def hybrid_coeff(xc_code, spin=0):
'''Support recursively defining hybrid functional
'''
hyb, fn_facs = parse_xc(xc_code)
for xid, fac in fn_facs:
hyb[0] += fac * _itrf.LIBXC_hybrid_coeff(ctypes.c_int(xid))
return hyb[0]
def nlc_coeff(xc_code):
'''Get NLC coefficients
'''
nlc_code = None
if isinstance(xc_code, str) and '__VV10' in xc_code.upper():
xc_code, nlc_code = xc_code.upper().split('__', 1)
hyb, fn_facs = parse_xc(xc_code)
nlc_pars = [0, 0]
nlc_tmp = (ctypes.c_double*2)()
for xid, fac in fn_facs:
_itrf.LIBXC_nlc_coeff(xid, nlc_tmp)
nlc_pars[0] += nlc_tmp[0]
nlc_pars[1] += nlc_tmp[1]
if nlc_pars[0] == 0 and nlc_pars[1] == 0:
if nlc_code is not None:
# Use VV10 NLC parameters by default for the general case
_itrf.LIBXC_nlc_coeff(XC_CODES['GGA_XC_' + nlc_code], nlc_tmp)
nlc_pars[0] += nlc_tmp[0]
nlc_pars[1] += nlc_tmp[1]
else:
raise NotImplementedError(
'%s does not have NLC part. Available functionals are %s' %
(xc_code, ', '.join(VV10_XC.keys())))
return nlc_pars
def rsh_coeff(xc_code):
'''Range-separated parameter and HF exchange components: omega, alpha, beta
Exc_RSH = c_LR * LR_HFX + c_SR * SR_HFX + (1-c_SR) * Ex_SR + (1-c_LR) * Ex_LR + Ec
= alpha * HFX + beta * SR_HFX + (1-c_SR) * Ex_SR + (1-c_LR) * Ex_LR + Ec
= alpha * LR_HFX + hyb * SR_HFX + (1-c_SR) * Ex_SR + (1-c_LR) * Ex_LR + Ec
SR_HFX = < pi | e^{-omega r_{12}}/r_{12} | iq >
LR_HFX = < pi | (1-e^{-omega r_{12}})/r_{12} | iq >
alpha = c_LR
beta = c_SR - c_LR = hyb - alpha
'''
if xc_code is None:
return 0, 0, 0
check_omega = True
if isinstance(xc_code, str) and ',' in xc_code:
# Parse only X part for the RSH coefficients. This is to handle
# exceptions for C functionals such as M11.
xc_code = format_xc_code(xc_code)
xc_code = xc_code.split(',')[0] + ','
if 'SR_HF' in xc_code or 'LR_HF' in xc_code or 'RSH(' in xc_code:
check_omega = False
hyb, fn_facs = parse_xc(xc_code)
hyb, alpha, omega = hyb
beta = hyb - alpha
rsh_pars = [omega, alpha, beta]
rsh_tmp = (ctypes.c_double*3)()
_itrf.LIBXC_rsh_coeff(433, rsh_tmp)
for xid, fac in fn_facs:
_itrf.LIBXC_rsh_coeff(xid, rsh_tmp)
if rsh_pars[0] == 0:
rsh_pars[0] = rsh_tmp[0]
elif check_omega:
# Check functional is actually a CAM functional
if rsh_tmp[0] != 0 and not _itrf.LIBXC_is_cam_rsh(ctypes.c_int(xid)):
raise KeyError('Libxc functional %i employs a range separation '
'kernel that is not supported in PySCF' % xid)
# Check omega
if (rsh_tmp[0] != 0 and rsh_pars[0] != rsh_tmp[0]):
raise ValueError('Different values of omega found for RSH functionals')
rsh_pars[1] += rsh_tmp[1] * fac
rsh_pars[2] += rsh_tmp[2] * fac
return rsh_pars
def parse_xc_name(xc_name='LDA,VWN'):
'''Convert the XC functional name to libxc library internal ID.
'''
fn_facs = parse_xc(xc_name)[1]
return fn_facs[0][0], fn_facs[1][0]
def parse_xc(description):
r'''Rules to input functional description:
* The given functional description must be a one-line string.
* The functional description is case-insensitive.
* The functional description string has two parts, separated by ",". The
first part describes the exchange functional, the second is the correlation
functional.
- If "," was not in string, the entire string is considered as a
compound XC functional (including both X and C functionals, such as b3lyp).
- To input only X functional (without C functional), leave the second
part blank. E.g. description='slater,' means pure LDA functional.
- To neglect X functional (just apply C functional), leave the first
part blank. E.g. description=',vwn' means pure VWN functional.
- If compound XC functional is specified, no matter whehter it is in the
X part (the string in front of comma) or the C part (the string behind
comma), both X and C functionals of the compound XC functional will be
used.
* The functional name can be placed in arbitrary order. Two name needs to
be separated by operators "+" or "-". Blank spaces are ignored.
NOTE the parser only reads operators "+" "-" "*". / is not in support.
* A functional name can have at most one factor. If the factor is not
given, it is set to 1. Compound functional can be scaled as a unit. For
example '0.5*b3lyp' is equivalent to
'HF*0.1 + .04*LDA + .36*B88, .405*LYP + .095*VWN'
* String "HF" stands for exact exchange (HF K matrix). Putting "HF" in
correlation functional part is the same to putting "HF" in exchange
part.
* String "RSH" means range-separated operator. Its format is
RSH(omega, alpha, beta). Another way to input RSH is to use keywords
SR_HF and LR_HF: "SR_HF(0.1) * alpha_plus_beta" and "LR_HF(0.1) *
alpha" where the number in parenthesis is the value of omega.
* Be careful with the libxc convention on GGA functional, in which the LDA
contribution has been included.
Args:
xc_code : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
rho : ndarray
Shape of ((*,N)) for electron density (and derivatives) if spin = 0;
Shape of ((*,N),(*,N)) for alpha/beta electron density (and derivatives) if spin > 0;
where N is number of grids.
rho (*,N) are ordered as (den,grad_x,grad_y,grad_z,laplacian,tau)
where grad_x = d/dx den, laplacian = \nabla^2 den, tau = 1/2(\nabla f)^2
In spin unrestricted case,
rho is ((den_u,grad_xu,grad_yu,grad_zu,laplacian_u,tau_u)
(den_d,grad_xd,grad_yd,grad_zd,laplacian_d,tau_d))
Kwargs:
spin : int
spin polarized if spin > 0
relativity : int
No effects.
verbose : int or object of :class:`Logger`
No effects.
Returns:
ex, vxc, fxc, kxc
where
* vxc = (vrho, vsigma, vlapl, vtau) for restricted case
* vxc for unrestricted case
| vrho[:,2] = (u, d)
| vsigma[:,3] = (uu, ud, dd)
| vlapl[:,2] = (u, d)
| vtau[:,2] = (u, d)
* fxc for restricted case:
(v2rho2, v2rhosigma, v2sigma2, v2lapl2, vtau2, v2rholapl, v2rhotau, v2lapltau, v2sigmalapl, v2sigmatau)
* fxc for unrestricted case:
| v2rho2[:,3] = (u_u, u_d, d_d)
| v2rhosigma[:,6] = (u_uu, u_ud, u_dd, d_uu, d_ud, d_dd)
| v2sigma2[:,6] = (uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd)
| v2lapl2[:,3]
| vtau2[:,3]
| v2rholapl[:,4]
| v2rhotau[:,4]
| v2lapltau[:,4]
| v2sigmalapl[:,6]
| v2sigmatau[:,6]
* kxc for restricted case:
v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3,
v3rho2tau, v3rhosigmatau, v3rhotau2, v3sigma2tau, v3sigmatau2, v3tau3
* kxc for unrestricted case:
| v3rho3[:,4] = (u_u_u, u_u_d, u_d_d, d_d_d)
| v3rho2sigma[:,9] = (u_u_uu, u_u_ud, u_u_dd, u_d_uu, u_d_ud, u_d_dd, d_d_uu, d_d_ud, d_d_dd)
| v3rhosigma2[:,12] = (u_uu_uu, u_uu_ud, u_uu_dd, u_ud_ud, u_ud_dd, u_dd_dd, d_uu_uu, d_uu_ud, d_uu_dd, d_ud_ud, d_ud_dd, d_dd_dd)
| v3sigma3[:,10] = (uu_uu_uu, uu_uu_ud, uu_uu_dd, uu_ud_ud, uu_ud_dd, uu_dd_dd, ud_ud_ud, ud_ud_dd, ud_dd_dd, dd_dd_dd)
| v3rho2tau
| v3rhosigmatau
| v3rhotau2
| v3sigma2tau
| v3sigmatau2
| v3tau3
see also libxc_itrf.c
''' # noqa: E501
hyb = [0, 0, 0] # hybrid, alpha, omega (== SR_HF, LR_HF, omega)
if description is None:
return hyb, []
elif isinstance(description, int):
return hyb, [(description, 1.)]
elif not isinstance(description, str): #isinstance(description, (tuple,list)):
return parse_xc('%s,%s' % tuple(description))
def assign_omega(omega, hyb_or_sr, lr=0):
if hyb[2] == omega or omega == 0:
hyb[0] += hyb_or_sr
hyb[1] += lr
elif hyb[2] == 0:
hyb[0] += hyb_or_sr
hyb[1] += lr
hyb[2] = omega
else:
raise ValueError('Different values of omega found for RSH functionals')
fn_facs = []
def parse_token(token, ftype, search_xc_alias=False):
if token:
if token[0] == '-':
sign = -1
token = token[1:]
else:
sign = 1
if '*' in token:
fac, key = token.split('*')
if fac[0].isalpha():
fac, key = key, fac
fac = sign * float(fac)
else:
fac, key = sign, token
if key[:3] == 'RSH':
# RSH(alpha; beta; omega): Range-separated-hybrid functional
# See also utils.format_xc_code
alpha, beta, omega = [float(x) for x in key[4:-1].split(';')]
assign_omega(omega, fac*(alpha+beta), fac*alpha)
elif key == 'HF':
hyb[0] += fac
hyb[1] += fac # also add to LR_HF
elif 'SR_HF' in key:
if '(' in key:
omega = float(key.split('(')[1].split(')')[0])
assign_omega(omega, fac, 0)
else: # Assuming this omega the same to the existing omega
hyb[0] += fac
elif 'LR_HF' in key:
if '(' in key:
omega = float(key.split('(')[1].split(')')[0])
assign_omega(omega, 0, fac)
else:
hyb[1] += fac # == alpha
elif key.isdigit():
fn_facs.append((int(key), fac))
else:
if search_xc_alias and key in XC_ALIAS:
x_id = XC_ALIAS[key]
elif key in XC_CODES:
x_id = XC_CODES[key]
else:
possible_xc_for = fpossible_dic[ftype]
possible_xc = XC_KEYS.intersection(possible_xc_for(key))
if possible_xc:
if len(possible_xc) > 1:
sys.stderr.write('Possible xc_code %s matches %s. '
% (list(possible_xc), key))
for x_id in possible_xc: # Prefer X functional
if '_X_' in x_id:
break
else:
x_id = possible_xc.pop()
sys.stderr.write('XC parser takes %s\n' % x_id)
sys.stderr.write('You can add prefix to %s for a '
'specific functional (e.g. X_%s, '
'HYB_MGGA_X_%s)\n'
% (key, key, key))
else:
x_id = possible_xc.pop()
x_id = XC_CODES[x_id]
else:
raise KeyError('Unknown %s functional %s' % (ftype, key))
if isinstance(x_id, str):
hyb1, fn_facs1 = parse_xc(x_id)
# Recursively scale the composed functional, to support e.g. '0.5*b3lyp'
if hyb1[0] != 0 or hyb1[1] != 0:
assign_omega(hyb1[2], hyb1[0]*fac, hyb1[1]*fac)
fn_facs.extend([(xid, c*fac) for xid, c in fn_facs1])
elif x_id is None:
raise NotImplementedError('%s functional %s' % (ftype, key))
else:
fn_facs.append((x_id, fac))
def possible_x_for(key):
return set((key,
'LDA_X_'+key, 'GGA_X_'+key, 'MGGA_X_'+key,
'HYB_GGA_X_'+key, 'HYB_MGGA_X_'+key))
def possible_xc_for(key):
return set((key, 'LDA_XC_'+key, 'GGA_XC_'+key, 'MGGA_XC_'+key,
'HYB_GGA_XC_'+key, 'HYB_MGGA_XC_'+key))
def possible_k_for(key):
return set((key,
'LDA_K_'+key, 'GGA_K_'+key,))
def possible_x_k_for(key):
return possible_x_for(key).union(possible_k_for(key))
def possible_c_for(key):
return set((key,
'LDA_C_'+key, 'GGA_C_'+key, 'MGGA_C_'+key))
fpossible_dic = {'X': possible_x_for,
'C': possible_c_for,
'compound XC': possible_xc_for,
'K': possible_k_for,
'X or K': possible_x_k_for}
description = format_xc_code(description)
if '-' in description: # To handle e.g. M06-L
for key in _NAME_WITH_DASH:
if key in description:
description = description.replace(key, _NAME_WITH_DASH[key])
if ',' in description:
x_code, c_code = description.split(',')
for token in x_code.replace('-', '+-').replace(';+', ';').split('+'):
parse_token(token, 'X or K')
for token in c_code.replace('-', '+-').replace(';+', ';').split('+'):
parse_token(token, 'C')
else:
for token in description.replace('-', '+-').replace(';+', ';').split('+'):
parse_token(token, 'compound XC', search_xc_alias=True)
if hyb[2] == 0: # No omega is assigned. LR_HF is 0 for normal Coulomb operator
hyb[1] = 0
return hyb, remove_dup(fn_facs)
_NAME_WITH_DASH = {'SR-HF' : 'SR_HF',
'LR-HF' : 'LR_HF',
'OTPSS-D' : 'OTPSS_D',
'B97-1' : 'B97_1',
'B97-2' : 'B97_2',
'B97-3' : 'B97_3',
'B97-K' : 'B97_K',
'B97-D' : 'B97_D',
'HCTH-93' : 'HCTH_93',
'HCTH-120' : 'HCTH_120',
'HCTH-147' : 'HCTH_147',
'HCTH-407' : 'HCTH_407',
'WB97X-D' : 'WB97X_D',
'WB97X-V' : 'WB97X_V',
'WB97M-V' : 'WB97M_V',
'B97M-V' : 'B97M_V',
'M05-2X' : 'M05_2X',
'M06-L' : 'M06_L',
'M06-HF' : 'M06_HF',
'M06-2X' : 'M06_2X',
'M08-HX' : 'M08_HX',
'M08-SO' : 'M08_SO',
'M11-L' : 'M11_L',
'MN12-L' : 'MN12_L',
'MN15-L' : 'MN15_L',
'MN12-SX' : 'MN12_SX',
'N12-SX' : 'N12_SX',
'LRC-WPBE' : 'LRC_WPBE',
'LRC-WPBEH': 'LRC_WPBEH',
'LC-VV10' : 'LC_VV10',
'CAM-B3LYP': 'CAM_B3LYP'}
def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, omega=None, verbose=None):
r'''Interface to call libxc library to evaluate XC functional, potential
and functional derivatives.
* The given functional xc_code must be a one-line string.
* The functional xc_code is case-insensitive.
* The functional xc_code string has two parts, separated by ",". The
first part describes the exchange functional, the second part sets the
correlation functional.
- If "," not appeared in string, the entire string is treated as the
name of a compound functional (containing both the exchange and
the correlation functional) which was declared in the functional
aliases list. The full list of functional aliases can be obtained by
calling the function pyscf.dft.xcfun.XC_ALIAS.keys() .
If the string was not found in the aliased functional list, it is
treated as X functional.
- To input only X functional (without C functional), leave the second
part blank. E.g. description='slater,' means a functional with LDA
contribution only.
- To neglect the contribution of X functional (just apply C functional),
leave blank in the first part, e.g. description=',vwn' means a
functional with VWN only.
- If compound XC functional is specified, no matter whether it is in the
X part (the string in front of comma) or the C part (the string behind
comma), both X and C functionals of the compound XC functional will be
used.
* The functional name can be placed in arbitrary order. Two names need to
be separated by operators "+" or "-". Blank spaces are ignored.
NOTE the parser only reads operators "+" "-" "*". / is not supported.
* A functional name can have at most one factor. If the factor is not
given, it is set to 1. Compound functional can be scaled as a unit. For
example '0.5*b3lyp' is equivalent to
'HF*0.1 + .04*LDA + .36*B88, .405*LYP + .095*VWN'
* String "HF" stands for exact exchange (HF K matrix). "HF" can be put in
the correlation functional part (after comma). Putting "HF" in the
correlation part is the same to putting "HF" in the exchange part.
* String "RSH" means range-separated operator. Its format is
RSH(omega, alpha, beta). Another way to input RSH is to use keywords
SR_HF and LR_HF: "SR_HF(0.1) * alpha_plus_beta" and "LR_HF(0.1) *
alpha" where the number in parenthesis is the value of omega.
* Be careful with the libxc convention of GGA functional, in which the LDA
contribution is included.
Args:
xc_code : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" (exact exchange) is appeared in the string, the HF part will
be skipped. If an empty string "" is given, the returns exc, vxc,...
will be vectors of zeros.
rho : ndarray
Shape of ((*,N)) for electron density (and derivatives) if spin = 0;
Shape of ((*,N),(*,N)) for alpha/beta electron density (and derivatives) if spin > 0;
where N is number of grids.
rho (*,N) are ordered as (den,grad_x,grad_y,grad_z,laplacian,tau)
where grad_x = d/dx den, laplacian = \nabla^2 den, tau = 1/2(\nabla f)^2
In spin unrestricted case,
rho is ((den_u,grad_xu,grad_yu,grad_zu,laplacian_u,tau_u)
(den_d,grad_xd,grad_yd,grad_zd,laplacian_d,tau_d))
Kwargs:
spin : int
spin polarized if spin > 0
relativity : int
No effects.
verbose : int or object of :class:`Logger`
No effects.
Returns:
ex, vxc, fxc, kxc
where
* vxc = (vrho, vsigma, vlapl, vtau) for restricted case
* vxc for unrestricted case
| vrho[:,2] = (u, d)
| vsigma[:,3] = (uu, ud, dd)
| vlapl[:,2] = (u, d)
| vtau[:,2] = (u, d)
* fxc for restricted case:
(v2rho2, v2rhosigma, v2sigma2, v2lapl2, vtau2, v2rholapl, v2rhotau, v2lapltau, v2sigmalapl, v2sigmatau)
* fxc for unrestricted case:
| v2rho2[:,3] = (u_u, u_d, d_d)
| v2rhosigma[:,6] = (u_uu, u_ud, u_dd, d_uu, d_ud, d_dd)
| v2sigma2[:,6] = (uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd)
| v2lapl2[:,3]
| vtau2[:,3]
| v2rholapl[:,4]
| v2rhotau[:,4]
| v2lapltau[:,4]
| v2sigmalapl[:,6]
| v2sigmatau[:,6]
* kxc for restricted case:
(v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3)
* kxc for unrestricted case:
| v3rho3[:,4] = (u_u_u, u_u_d, u_d_d, d_d_d)
| v3rho2sigma[:,9] = (u_u_uu, u_u_ud, u_u_dd, u_d_uu, u_d_ud, u_d_dd, d_d_uu, d_d_ud, d_d_dd)
| v3rhosigma2[:,12] = (u_uu_uu, u_uu_ud, u_uu_dd, u_ud_ud, u_ud_dd, u_dd_dd, d_uu_uu, d_uu_ud, d_uu_dd, d_ud_ud, d_ud_dd, d_dd_dd)
| v3sigma3[:,10] = (uu_uu_uu, uu_uu_ud, uu_uu_dd, uu_ud_ud, uu_ud_dd, uu_dd_dd, ud_ud_ud, ud_ud_dd, ud_dd_dd, dd_dd_dd)
see also libxc_itrf.c
''' # noqa: E501
hyb, fn_facs = parse_xc(xc_code)
if omega is not None:
hyb[2] = float(omega)
return _eval_xc(hyb, fn_facs, rho, spin, relativity, deriv, verbose)
def _eval_xc(hyb, fn_facs, rho, spin=0, relativity=0, deriv=1, verbose=None):
assert(deriv <= 3)
if spin == 0:
nspin = 1
rho_u = rho_d = numpy.asarray(rho, order='C')
else:
nspin = 2
rho_u = numpy.asarray(rho[0], order='C')
rho_d = numpy.asarray(rho[1], order='C')
assert(rho_u.dtype == numpy.double)
assert(rho_d.dtype == numpy.double)
if rho_u.ndim == 1:
rho_u = rho_u.reshape(1,-1)
rho_d = rho_d.reshape(1,-1)
ngrids = rho_u.shape[1]
fn_ids = [x[0] for x in fn_facs]
facs = [x[1] for x in fn_facs]
if hyb[2] != 0:
# Current implementation does not support different omegas for
# different RSH functionals if there are multiple RSHs
omega = [hyb[2]] * len(facs)
else:
omega = [0] * len(facs)
fn_ids_set = set(fn_ids)
if fn_ids_set.intersection(PROBLEMATIC_XC):
problem_xc = [PROBLEMATIC_XC[k]
for k in fn_ids_set.intersection(PROBLEMATIC_XC)]
warnings.warn('Libxc functionals %s may have discrepancy to xcfun '
'library.\n' % problem_xc)
if any([needs_laplacian(fid) for fid in fn_ids]):
raise NotImplementedError('laplacian in meta-GGA method')
n = len(fn_ids)
if (n == 0 or # xc_code = '' or xc_code = 'HF', an empty functional
all((is_lda(x) for x in fn_ids))):
if spin == 0:
nvar = 1
else:
nvar = 2
elif any((is_meta_gga(x) for x in fn_ids)):
if spin == 0:
nvar = 4
else:
nvar = 9
else: # GGA
if spin == 0:
nvar = 2
else:
nvar = 5
outlen = (math.factorial(nvar+deriv) //
(math.factorial(nvar) * math.factorial(deriv)))
outbuf = numpy.zeros((outlen,ngrids))
_itrf.LIBXC_eval_xc(ctypes.c_int(n),
(ctypes.c_int*n)(*fn_ids),
(ctypes.c_double*n)(*facs),
(ctypes.c_double*n)(*omega),
ctypes.c_int(nspin),
ctypes.c_int(deriv), ctypes.c_int(rho_u.shape[1]),
rho_u.ctypes.data_as(ctypes.c_void_p),
rho_d.ctypes.data_as(ctypes.c_void_p),
outbuf.ctypes.data_as(ctypes.c_void_p))
exc = outbuf[0]
vxc = fxc = kxc = None
if nvar == 1: # LDA
if deriv > 0:
vxc = (outbuf[1], None, None, None)
if deriv > 1:
fxc = (outbuf[2],) + (None,)*9
if deriv > 2:
kxc = (outbuf[3], None, None, None)
elif nvar == 2:
if spin == 0: # GGA
if deriv > 0:
vxc = (outbuf[1], outbuf[2], None, None)
if deriv > 1:
fxc = (outbuf[3], outbuf[4], outbuf[5],) + (None,)*7
if deriv > 2:
kxc = outbuf[6:10]
else: # LDA
if deriv > 0:
vxc = (outbuf[1:3].T, None, None, None)
if deriv > 1:
fxc = (outbuf[3:6].T,) + (None,)*9
if deriv > 2:
kxc = (outbuf[6:10].T, None, None, None)
elif nvar == 5: # GGA
if deriv > 0:
vxc = (outbuf[1:3].T, outbuf[3:6].T, None, None)
if deriv > 1:
fxc = (outbuf[6:9].T, outbuf[9:15].T, outbuf[15:21].T) + (None,)*7
if deriv > 2:
kxc = (outbuf[21:25].T, outbuf[25:34].T, outbuf[34:46].T, outbuf[46:56].T)
elif nvar == 4: # MGGA
if deriv > 0:
vxc = outbuf[1:5]
if deriv > 1:
fxc = outbuf[5:15]
if deriv > 2:
kxc = outbuf[15:19]
elif nvar == 9: # MGGA
if deriv > 0:
vxc = (outbuf[1:3].T, outbuf[3:6].T, outbuf[6:8].T, outbuf[8:10].T)
if deriv > 1:
fxc = (outbuf[10:13].T, outbuf[13:19].T, outbuf[19:25].T,
outbuf[25:28].T, outbuf[28:31].T, outbuf[31:35].T,
outbuf[35:39].T, outbuf[39:43].T, outbuf[43:49].T,
outbuf[49:55].T)
return exc, vxc, fxc, kxc
def define_xc_(ni, description, xctype='LDA', hyb=0, rsh=(0,0,0)):
'''Define XC functional. See also :func:`eval_xc` for the rules of input description.
Args:
ni : an instance of :class:`NumInt`
description : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
Kwargs:
xctype : str
'LDA' or 'GGA' or 'MGGA'
hyb : float
hybrid functional coefficient
rsh : a list of three floats
coefficients (omega, alpha, beta) for range-separated hybrid functional.
omega is the exponent factor in attenuated Coulomb operator e^{-omega r_{12}}/r_{12}
alpha is the coefficient for long-range part, hybrid coefficient
can be obtained by alpha + beta
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz')
>>> mf = dft.RKS(mol)
>>> define_xc_(mf._numint, '.2*HF + .08*LDA + .72*B88, .81*LYP + .19*VWN')
>>> mf.kernel()
-76.3783361189611
>>> define_xc_(mf._numint, 'LDA*.08 + .72*B88 + .2*HF, .81*LYP + .19*VWN')
>>> mf.kernel()
-76.3783361189611
>>> def eval_xc(xc_code, rho, *args, **kwargs):
... exc = 0.01 * rho**2
... vrho = 0.01 * 2 * rho
... vxc = (vrho, None, None, None)
... fxc = None # 2nd order functional derivative
... kxc = None # 3rd order functional derivative
... return exc, vxc, fxc, kxc
>>> define_xc_(mf._numint, eval_xc, xctype='LDA')
>>> mf.kernel()
48.8525211046668
'''
if isinstance(description, str):
ni.eval_xc = lambda xc_code, rho, *args, **kwargs: \
eval_xc(description, rho, *args, **kwargs)
ni.hybrid_coeff = lambda *args, **kwargs: hybrid_coeff(description)
ni.rsh_coeff = lambda *args: rsh_coeff(description)
ni._xc_type = lambda *args: xc_type(description)
elif callable(description):
ni.eval_xc = description
ni.hybrid_coeff = lambda *args, **kwargs: hyb
ni.rsh_coeff = lambda *args, **kwargs: rsh
ni._xc_type = lambda *args: xctype
else:
raise ValueError('Unknown description %s' % description)
return ni
def define_xc(ni, description, xctype='LDA', hyb=0, rsh=(0,0,0)):
return define_xc_(copy.copy(ni), description, xctype, hyb, rsh)
define_xc.__doc__ = define_xc_.__doc__
|
the-stack_0_4819 | """
ID: fufa0001
LANG: PYTHON3
TASK: milk2
"""
fin = open('milk2.in','r')
fout = open('milk2.out','w')
count, *times = fin.readlines()
for i in range(0,int(count)):
times[i]=list(map(int, times[i].split()))
times = sorted(times, key=lambda tup: tup[0])
merged = []
for higher in times:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
longest_milk = 0
longest_no_milk = 0
for i in range(0,len(merged)):
diff = merged[i][1] - merged[i][0]
if diff > longest_milk:
longest_milk = diff
if i != len(merged) - 1:
diff = merged[i+1][0] - merged[i][1]
if diff > longest_no_milk:
longest_no_milk = diff
fout.write(str(longest_milk) + " " + str(longest_no_milk) + "\n")
fout.close()
|
the-stack_0_4820 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
import socket
from os.path import join as pjoin
def get_spec_path(spec, package_name, path_replacements={}, use_bin=False):
"""Extracts the prefix path for the given spack package
path_replacements is a dictionary with string replacements for the path.
"""
if not use_bin:
path = spec[package_name].prefix
else:
path = spec[package_name].prefix.bin
path = os.path.realpath(path)
for key in path_replacements:
path = path.replace(key, path_replacements[key])
return path
class Axom(CachedCMakePackage, CudaPackage):
"""Axom provides a robust, flexible software infrastructure for the development
of multi-physics applications and computational tools."""
maintainers = ['white238']
homepage = "https://github.com/LLNL/axom"
git = "https://github.com/LLNL/axom.git"
version('main', branch='main', submodules=True)
version('develop', branch='develop', submodules=True)
version('0.5.0', tag='v0.5.0', submodules=True)
version('0.4.0', tag='v0.4.0', submodules=True)
version('0.3.3', tag='v0.3.3', submodules=True)
version('0.3.2', tag='v0.3.2', submodules=True)
version('0.3.1', tag='v0.3.1', submodules=True)
version('0.3.0', tag='v0.3.0', submodules=True)
version('0.2.9', tag='v0.2.9', submodules=True)
root_cmakelists_dir = 'src'
# -----------------------------------------------------------------------
# Variants
# -----------------------------------------------------------------------
variant('shared', default=True,
description='Enable build of shared libraries')
variant('debug', default=False,
description='Build debug instead of optimized version')
variant('examples', default=True, description='Build examples')
variant('tools', default=True, description='Build tools')
variant('cpp14', default=True, description="Build with C++14 support")
variant('fortran', default=True, description="Build with Fortran support")
variant("python", default=False, description="Build python support")
variant("mpi", default=True, description="Build MPI support")
variant('openmp', default=True, description='Turn on OpenMP support.')
variant("mfem", default=False, description="Build with mfem")
variant("hdf5", default=True, description="Build with hdf5")
variant("lua", default=True, description="Build with Lua")
variant("scr", default=False, description="Build with SCR")
variant("umpire", default=True, description="Build with umpire")
variant("raja", default=True, description="Build with raja")
varmsg = "Build development tools (such as Sphinx, Doxygen, etc...)"
variant("devtools", default=False, description=varmsg)
# -----------------------------------------------------------------------
# Dependencies
# -----------------------------------------------------------------------
# Basics
depends_on("[email protected]:", type='build')
depends_on("mpi", when="+mpi")
# Libraries
depends_on("conduit+python", when="+python")
depends_on("conduit~python", when="~python")
depends_on("conduit+hdf5", when="+hdf5")
depends_on("conduit~hdf5", when="~hdf5")
# HDF5 needs to be the same as Conduit's
depends_on("[email protected]:1.8.999~cxx~fortran", when="+hdf5")
depends_on("lua", when="+lua")
depends_on("scr", when="+scr")
depends_on("kvtree@master", when="+scr")
depends_on("dtcmp", when="+scr")
depends_on("raja~openmp", when="+raja~openmp")
depends_on("raja+openmp", when="+raja+openmp")
depends_on("raja+cuda", when="+raja+cuda")
depends_on("umpire~openmp", when="+umpire~openmp")
depends_on("umpire+openmp", when="+umpire+openmp")
depends_on("umpire+cuda", when="+umpire+cuda")
for sm_ in CudaPackage.cuda_arch_values:
depends_on('raja cuda_arch={0}'.format(sm_),
when='+raja cuda_arch={0}'.format(sm_))
depends_on('umpire cuda_arch={0}'.format(sm_),
when='+umpire cuda_arch={0}'.format(sm_))
depends_on("mfem", when="+mfem")
depends_on("mfem~mpi", when="+mfem~mpi")
depends_on("python", when="+python")
# Devtools
depends_on("cppcheck", when="+devtools")
depends_on("doxygen", when="+devtools")
depends_on("graphviz", when="+devtools")
depends_on("python", when="+devtools")
depends_on("py-sphinx", when="+devtools")
depends_on("py-shroud", when="+devtools")
depends_on("[email protected]", when="+devtools", type='build')
# Conduit's cmake config files moved and < 0.4.0 can't find it
conflicts("^[email protected]:", when="@:0.4.0")
# Sidre requires conduit_blueprint_mpi.hpp
conflicts("^conduit@:0.6.0", when="@0.5.0:")
def flag_handler(self, name, flags):
if self.spec.satisfies('%cce') and name == 'fflags':
flags.append('-ef')
if name in ('cflags', 'cxxflags', 'cppflags', 'fflags'):
return (None, None, None) # handled in the cmake cache
return (flags, None, None)
def _get_sys_type(self, spec):
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
return sys_type
@property
def cache_name(self):
hostname = socket.gethostname()
if "SYS_TYPE" in env:
# Are we on a LLNL system then strip node number
hostname = hostname.rstrip('1234567890')
return "{0}-{1}-{2}@{3}.cmake".format(
hostname,
self._get_sys_type(self.spec),
self.spec.compiler.name,
self.spec.compiler.version
)
def initconfig_compiler_entries(self):
spec = self.spec
entries = super(Axom, self).initconfig_compiler_entries()
if "+fortran" in spec or self.compiler.fc is not None:
entries.append(cmake_cache_option("ENABLE_FORTRAN", True))
else:
entries.append(cmake_cache_option("ENABLE_FORTRAN", False))
if ((self.compiler.fc is not None)
and ("gfortran" in self.compiler.fc)
and ("clang" in self.compiler.cxx)):
libdir = pjoin(os.path.dirname(
os.path.dirname(self.compiler.cxx)), "lib")
flags = ""
for _libpath in [libdir, libdir + "64"]:
if os.path.exists(_libpath):
flags += " -Wl,-rpath,{0}".format(_libpath)
description = ("Adds a missing libstdc++ rpath")
if flags:
entries.append(cmake_cache_string("BLT_EXE_LINKER_FLAGS", flags,
description))
if "+cpp14" in spec:
entries.append(cmake_cache_string("BLT_CXX_STD", "c++14", ""))
return entries
def initconfig_hardware_entries(self):
spec = self.spec
entries = super(Axom, self).initconfig_hardware_entries()
if "+cuda" in spec:
entries.append(cmake_cache_option("ENABLE_CUDA", True))
entries.append(cmake_cache_option("CUDA_SEPARABLE_COMPILATION",
True))
entries.append(
cmake_cache_option("AXOM_ENABLE_ANNOTATIONS", True))
# CUDA_FLAGS
cudaflags = "-restrict --expt-extended-lambda "
if not spec.satisfies('cuda_arch=none'):
cuda_arch = spec.variants['cuda_arch'].value[0]
entries.append(cmake_cache_string(
"CMAKE_CUDA_ARCHITECTURES",
cuda_arch))
cudaflags += '-arch sm_${CMAKE_CUDA_ARCHITECTURES} '
else:
entries.append(
"# cuda_arch could not be determined\n\n")
if "+cpp14" in spec:
cudaflags += " -std=c++14"
else:
cudaflags += " -std=c++11"
entries.append(
cmake_cache_string("CMAKE_CUDA_FLAGS", cudaflags))
entries.append(
"# nvcc does not like gtest's 'pthreads' flag\n")
entries.append(
cmake_cache_option("gtest_disable_pthreads", True))
entries.append("#------------------{0}".format("-" * 30))
entries.append("# Hardware Specifics")
entries.append("#------------------{0}\n".format("-" * 30))
# OpenMP
entries.append(cmake_cache_option("ENABLE_OPENMP",
spec.satisfies('+openmp')))
# Enable death tests
entries.append(cmake_cache_option(
"ENABLE_GTEST_DEATH_TESTS",
not spec.satisfies('+cuda target=ppc64le:')
))
if (self.compiler.fc is not None) and ("xlf" in self.compiler.fc):
# Grab lib directory for the current fortran compiler
libdir = pjoin(os.path.dirname(
os.path.dirname(self.compiler.fc)),
"lib")
description = ("Adds a missing rpath for libraries "
"associated with the fortran compiler")
linker_flags = "${BLT_EXE_LINKER_FLAGS} -Wl,-rpath," + libdir
entries.append(cmake_cache_string("BLT_EXE_LINKER_FLAGS",
linker_flags, description))
if "+shared" in spec:
linker_flags = "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-rpath," \
+ libdir
entries.append(cmake_cache_string(
"CMAKE_SHARED_LINKER_FLAGS",
linker_flags, description))
description = ("Converts C-style comments to Fortran style "
"in preprocessed files")
entries.append(cmake_cache_string(
"BLT_FORTRAN_FLAGS",
"-WF,-C! -qxlf2003=polymorphic",
description))
if spec.satisfies('target=ppc64le:'):
# Fix for working around CMake adding implicit link directories
# returned by the BlueOS compilers to link executables with
# non-system default stdlib
_gcc_prefix = "/usr/tce/packages/gcc/gcc-4.9.3/lib64"
if os.path.exists(_gcc_prefix):
_gcc_prefix2 = pjoin(
_gcc_prefix,
"gcc/powerpc64le-unknown-linux-gnu/4.9.3")
_link_dirs = "{0};{1}".format(_gcc_prefix, _gcc_prefix2)
entries.append(cmake_cache_string(
"BLT_CMAKE_IMPLICIT_LINK_DIRECTORIES_EXCLUDE",
_link_dirs))
return entries
def initconfig_mpi_entries(self):
spec = self.spec
entries = super(Axom, self).initconfig_mpi_entries()
if "+mpi" in spec:
entries.append(cmake_cache_option("ENABLE_MPI", True))
if spec['mpi'].name == 'spectrum-mpi':
entries.append(cmake_cache_string("BLT_MPI_COMMAND_APPEND",
"mpibind"))
else:
entries.append(cmake_cache_option("ENABLE_MPI", False))
return entries
def initconfig_package_entries(self):
spec = self.spec
entries = []
# TPL locations
entries.append("#------------------{0}".format("-" * 60))
entries.append("# TPLs")
entries.append("#------------------{0}\n".format("-" * 60))
# Try to find the common prefix of the TPL directory, including the
# compiler. If found, we will use this in the TPL paths
compiler_str = str(spec.compiler).replace('@', '-')
prefix_paths = prefix.split(compiler_str)
path_replacements = {}
if len(prefix_paths) == 2:
tpl_root = os.path.realpath(pjoin(prefix_paths[0], compiler_str))
path_replacements[tpl_root] = "${TPL_ROOT}"
entries.append("# Root directory for generated TPLs\n")
entries.append(cmake_cache_path("TPL_ROOT", tpl_root))
conduit_dir = get_spec_path(spec, "conduit", path_replacements)
entries.append(cmake_cache_path("CONDUIT_DIR", conduit_dir))
# optional tpls
for dep in ('mfem', 'hdf5', 'lua', 'raja', 'umpire'):
if '+%s' % dep in spec:
dep_dir = get_spec_path(spec, dep, path_replacements)
entries.append(cmake_cache_path('%s_DIR' % dep.upper(),
dep_dir))
else:
entries.append('# %s not built\n' % dep.upper())
if '+scr' in spec:
dep_dir = get_spec_path(spec, 'scr', path_replacements)
entries.append(cmake_cache_path('SCR_DIR', dep_dir))
# scr's dependencies
for dep in ('kvtree', 'dtcmp'):
if spec.satisfies('^{0}'.format(dep)):
dep_dir = get_spec_path(spec, dep, path_replacements)
entries.append(cmake_cache_path('%s_DIR' % dep.upper(), dep_dir))
else:
entries.append('# scr not built\n')
##################################
# Devtools
##################################
entries.append("#------------------{0}".format("-" * 60))
entries.append("# Devtools")
entries.append("#------------------{0}\n".format("-" * 60))
# Add common prefix to path replacement list
if "+devtools" in spec:
# Grab common devtools root and strip the trailing slash
path1 = os.path.realpath(spec["cppcheck"].prefix)
path2 = os.path.realpath(spec["doxygen"].prefix)
devtools_root = os.path.commonprefix([path1, path2])[:-1]
path_replacements[devtools_root] = "${DEVTOOLS_ROOT}"
entries.append(
"# Root directory for generated developer tools\n")
entries.append(cmake_cache_path("DEVTOOLS_ROOT", devtools_root))
# Only turn on clangformat support if devtools is on
clang_fmt_path = spec['llvm'].prefix.bin.join('clang-format')
entries.append(cmake_cache_path(
"CLANGFORMAT_EXECUTABLE", clang_fmt_path))
else:
entries.append("# ClangFormat disabled due to disabled devtools\n")
entries.append(cmake_cache_option("ENABLE_CLANGFORMAT", False))
if spec.satisfies('^python') or "+devtools" in spec:
python_path = os.path.realpath(spec['python'].command.path)
for key in path_replacements:
python_path = python_path.replace(key, path_replacements[key])
entries.append(cmake_cache_path("PYTHON_EXECUTABLE", python_path))
enable_docs = spec.satisfies('^doxygen') or spec.satisfies('^py-sphinx')
entries.append(cmake_cache_option("ENABLE_DOCS", enable_docs))
if spec.satisfies('^py-sphinx'):
python_bin_dir = get_spec_path(spec, "python",
path_replacements,
use_bin=True)
entries.append(cmake_cache_path("SPHINX_EXECUTABLE",
pjoin(python_bin_dir,
"sphinx-build")))
if spec.satisfies('^py-shroud'):
shroud_bin_dir = get_spec_path(spec, "py-shroud",
path_replacements, use_bin=True)
entries.append(cmake_cache_path("SHROUD_EXECUTABLE",
pjoin(shroud_bin_dir, "shroud")))
for dep in ('cppcheck', 'doxygen'):
if spec.satisfies('^%s' % dep):
dep_bin_dir = get_spec_path(spec, dep, path_replacements,
use_bin=True)
entries.append(cmake_cache_path('%s_EXECUTABLE' % dep.upper(),
pjoin(dep_bin_dir, dep)))
return entries
def cmake_args(self):
options = []
if self.run_tests is False:
options.append('-DENABLE_TESTS=OFF')
else:
options.append('-DENABLE_TESTS=ON')
options.append(self.define_from_variant(
'BUILD_SHARED_LIBS', 'shared'))
options.append(self.define_from_variant(
'AXOM_ENABLE_EXAMPLES', 'examples'))
options.append(self.define_from_variant(
'AXOM_ENABLE_TOOLS', 'tools'))
return options
def patch(self):
if self.spec.satisfies('%cce'):
filter_file('PROPERTIES LINKER_LANGUAGE CXX',
'PROPERTIES LINKER_LANGUAGE CXX \n LINK_FLAGS "-fopenmp"',
'src/axom/quest/examples/CMakeLists.txt')
|
the-stack_0_4821 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import TYPE_CHECKING
from uuid import uuid4
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from .._models import KeyVaultRoleAssignment, KeyVaultRoleDefinition
from .._internal import AsyncKeyVaultClientBase
if TYPE_CHECKING:
# pylint:disable=ungrouped-imports
from typing import Any, Optional, Union
from uuid import UUID
from azure.core.async_paging import AsyncItemPaged
from .._enums import KeyVaultRoleScope
class KeyVaultAccessControlClient(AsyncKeyVaultClientBase):
"""Manages role-based access to Azure Key Vault.
:param str vault_url: URL of the vault the client will manage. This is also called the vault's "DNS Name".
:param credential: an object which can provide an access token for the vault, such as a credential from
:mod:`azure.identity`
"""
# pylint:disable=protected-access
@distributed_trace_async
async def create_role_assignment(
self, role_scope: "Union[str, KeyVaultRoleScope]", role_definition_id: str, principal_id: str, **kwargs: "Any"
) -> KeyVaultRoleAssignment:
"""Create a role assignment.
:param role_scope: scope the role assignment will apply over. :class:`KeyVaultRoleScope` defines common broad
scopes. Specify a narrower scope as a string.
:type role_scope: str or KeyVaultRoleScope
:param str role_definition_id: ID of the role's definition
:param str principal_id: Azure Active Directory object ID of the principal which will be assigned the role. The
principal can be a user, service principal, or security group.
:keyword role_assignment_name: a name for the role assignment. Must be a UUID.
:paramtype role_assignment_name: str or uuid.UUID
:rtype: ~azure.keyvault.administration.KeyVaultRoleAssignment
"""
role_assignment_name = kwargs.pop("role_assignment_name", None) or uuid4()
create_parameters = self._client.role_assignments.models.RoleAssignmentCreateParameters(
properties=self._client.role_assignments.models.RoleAssignmentProperties(
principal_id=principal_id, role_definition_id=str(role_definition_id)
)
)
assignment = await self._client.role_assignments.create(
vault_base_url=self._vault_url,
scope=role_scope,
role_assignment_name=str(role_assignment_name),
parameters=create_parameters,
**kwargs
)
return KeyVaultRoleAssignment._from_generated(assignment)
@distributed_trace_async
async def delete_role_assignment(
self, role_scope: "Union[str, KeyVaultRoleScope]", role_assignment_name: "Union[str, UUID]", **kwargs: "Any"
) -> KeyVaultRoleAssignment:
"""Delete a role assignment.
:param role_scope: the assignment's scope, for example "/", "/keys", or "/keys/<specific key identifier>".
:class:`KeyVaultRoleScope` defines common broad scopes. Specify a narrower scope as a string.
:type role_scope: str or KeyVaultRoleScope
:param role_assignment_name: the assignment's name.
:type role_assignment_name: str or uuid.UUID
:returns: the deleted assignment
:rtype: ~azure.keyvault.administration.KeyVaultRoleAssignment
"""
assignment = await self._client.role_assignments.delete(
vault_base_url=self._vault_url, scope=role_scope, role_assignment_name=str(role_assignment_name), **kwargs
)
return KeyVaultRoleAssignment._from_generated(assignment)
@distributed_trace_async
async def get_role_assignment(
self, role_scope: "Union[str, KeyVaultRoleScope]", role_assignment_name: "Union[str, UUID]", **kwargs: "Any"
) -> KeyVaultRoleAssignment:
"""Get a role assignment.
:param role_scope: the assignment's scope, for example "/", "/keys", or "/keys/<specific key identifier>".
:class:`KeyVaultRoleScope` defines common broad scopes. Specify a narrower scope as a string.
:type role_scope: str or KeyVaultRoleScope
:param role_assignment_name: the assignment's name.
:type role_assignment_name: str or uuid.UUID
:rtype: ~azure.keyvault.administration.KeyVaultRoleAssignment
"""
assignment = await self._client.role_assignments.get(
vault_base_url=self._vault_url, scope=role_scope, role_assignment_name=str(role_assignment_name), **kwargs
)
return KeyVaultRoleAssignment._from_generated(assignment)
@distributed_trace
def list_role_assignments(
self, role_scope: "Union[str, KeyVaultRoleScope]", **kwargs: "Any"
) -> "AsyncItemPaged[KeyVaultRoleAssignment]":
"""List all role assignments for a scope.
:param role_scope: scope of the role assignments. :class:`KeyVaultRoleScope` defines common broad
scopes. Specify a narrower scope as a string.
:type role_scope: str or KeyVaultRoleScope
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.administration.KeyVaultRoleAssignment]
"""
return self._client.role_assignments.list_for_scope(
self._vault_url,
role_scope,
cls=lambda result: [KeyVaultRoleAssignment._from_generated(a) for a in result],
**kwargs
)
@distributed_trace_async
async def set_role_definition(
self,
role_scope: "Union[str, KeyVaultRoleScope]",
role_definition_name: "Optional[Union[str, UUID]]" = None,
**kwargs: "Any"
) -> "KeyVaultRoleDefinition":
"""Creates or updates a custom role definition.
:param role_scope: scope of the role definition. :class:`KeyVaultRoleScope` defines common broad scopes.
Specify a narrower scope as a string. Managed HSM only supports '/', or KeyVaultRoleScope.GLOBAL.
:type role_scope: str or KeyVaultRoleScope
:param role_definition_name: the unique role definition name. Unless a UUID is provided, a new role definition
will be created with a generated unique name. Providing the unique name of an existing role definition will
update that role definition.
:type role_definition_name: str or uuid.UUID
:keyword str role_name: the role's display name. If unspecified when creating or updating a role definition, the
role name will be set to an empty string.
:keyword str description: a description of the role definition. If unspecified when creating or updating a role
definition, the description will be set to an empty string.
:keyword permissions: the role definition's permissions. If unspecified when creating or updating a role
definition, the role definition will have no action permissions.
:paramtype permissions: Iterable[KeyVaultPermission]
:keyword assignable_scopes: the scopes for which the role definition can be assigned.
:paramtype assignable_scopes: Iterable[str] or Iterable[KeyVaultRoleScope]
:returns: The created or updated role definition
:rtype: ~azure.keyvault.administration.KeyVaultRoleDefinition
"""
permissions = [
self._client.role_definitions.models.Permission(
actions=p.actions,
not_actions=p.not_actions,
data_actions=p.data_actions,
not_data_actions=p.not_data_actions,
)
for p in kwargs.pop("permissions", None) or []
]
properties = self._client.role_definitions.models.RoleDefinitionProperties(
role_name=kwargs.pop("role_name", None),
description=kwargs.pop("description", None),
permissions=permissions,
assignable_scopes=kwargs.pop("assignable_scopes", None),
)
parameters = self._client.role_definitions.models.RoleDefinitionCreateParameters(properties=properties)
definition = await self._client.role_definitions.create_or_update(
vault_base_url=self._vault_url,
scope=role_scope,
role_definition_name=str(role_definition_name or uuid4()),
parameters=parameters,
**kwargs
)
return KeyVaultRoleDefinition._from_generated(definition)
@distributed_trace_async
async def get_role_definition(
self, role_scope: "Union[str, KeyVaultRoleScope]", role_definition_name: "Union[str, UUID]", **kwargs: "Any"
) -> "KeyVaultRoleDefinition":
"""Get the specified role definition.
:param role_scope: scope of the role definition. :class:`KeyVaultRoleScope` defines common broad scopes.
Specify a narrower scope as a string. Managed HSM only supports '/', or KeyVaultRoleScope.GLOBAL.
:type role_scope: str or KeyVaultRoleScope
:param role_definition_name: the role definition's name.
:type role_definition_name: str or uuid.UUID
:rtype: ~azure.keyvault.administration.KeyVaultRoleDefinition
"""
definition = await self._client.role_definitions.get(
vault_base_url=self._vault_url, scope=role_scope, role_definition_name=str(role_definition_name), **kwargs
)
return KeyVaultRoleDefinition._from_generated(definition)
@distributed_trace_async
async def delete_role_definition(
self, role_scope: "Union[str, KeyVaultRoleScope]", role_definition_name: "Union[str, UUID]", **kwargs: "Any"
) -> "KeyVaultRoleDefinition":
"""Deletes a custom role definition.
:param role_scope: scope of the role definition. :class:`KeyVaultRoleScope` defines common broad scopes.
Specify a narrower scope as a string. Managed HSM only supports '/', or KeyVaultRoleScope.GLOBAL.
:type role_scope: str or KeyVaultRoleScope
:param role_definition_name: the role definition's name.
:type role_definition_name: str or uuid.UUID
:returns: the deleted role definition
:rtype: ~azure.keyvault.administration.KeyVaultRoleDefinition
"""
definition = await self._client.role_definitions.delete(
vault_base_url=self._vault_url, scope=role_scope, role_definition_name=str(role_definition_name), **kwargs
)
return KeyVaultRoleDefinition._from_generated(definition)
@distributed_trace
def list_role_definitions(
self, role_scope: "Union[str, KeyVaultRoleScope]", **kwargs: "Any"
) -> "AsyncItemPaged[KeyVaultRoleDefinition]":
"""List all role definitions applicable at and above a scope.
:param role_scope: scope of the role definitions. :class:`KeyVaultRoleScope` defines common broad
scopes. Specify a narrower scope as a string.
:type role_scope: str or KeyVaultRoleScope
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.administration.KeyVaultRoleDefinition]
"""
return self._client.role_definitions.list(
self._vault_url,
role_scope,
cls=lambda result: [KeyVaultRoleDefinition._from_generated(d) for d in result],
**kwargs
)
|
the-stack_0_4824 | """The IPython kernel implementation"""
import asyncio
from contextlib import contextmanager
from functools import partial
import getpass
import signal
import sys
from IPython.core import release
from ipython_genutils.py3compat import builtin_mod, PY3, unicode_type, safe_unicode
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from tornado import gen
from traitlets import Instance, Type, Any, List, Bool
from .comm import CommManager
from .kernelbase import Kernel as KernelBase
from .zmqshell import ZMQInteractiveShell
try:
from IPython.core.interactiveshell import _asyncio_runner
except ImportError:
_asyncio_runner = None
try:
from IPython.core.completer import rectify_completions as _rectify_completions, provisionalcompleter as _provisionalcompleter
_use_experimental_60_completion = True
except ImportError:
_use_experimental_60_completion = False
_EXPERIMENTAL_KEY_NAME = '_jupyter_types_experimental'
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
shell_class = Type(ZMQInteractiveShell)
use_experimental_completions = Bool(True,
help="Set this flag to False to deactivate the use of experimental IPython completion APIs.",
).tag(config=True)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir = self.profile_dir,
user_module = self.user_module,
user_ns = self.user_ns,
kernel = self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.comm_manager = CommManager(parent=self, kernel=self)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
help_links = List([
{
'text': "Python Reference",
'url': "https://docs.python.org/%i.%i" % sys.version_info[:2],
},
{
'text': "IPython Reference",
'url': "https://ipython.org/documentation.html",
},
{
'text': "NumPy Reference",
'url': "https://docs.scipy.org/doc/numpy/reference/",
},
{
'text': "SciPy Reference",
'url': "https://docs.scipy.org/doc/scipy/reference/",
},
{
'text': "Matplotlib Reference",
'url': "https://matplotlib.org/contents.html",
},
{
'text': "SymPy Reference",
'url': "http://docs.sympy.org/latest/index.html",
},
{
'text': "pandas Reference",
'url': "https://pandas.pydata.org/pandas-docs/stable/",
},
]).tag(config=True)
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {
'name': 'ipython',
'version': sys.version_info[0]
},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def init_metadata(self, parent):
"""Initialize metadata.
Run at the beginning of each execution request.
"""
md = super(IPythonKernel, self).init_metadata(parent)
# FIXME: remove deprecated ipyparallel-specific code
# This is required for ipyparallel < 5.0
md.update({
'dependencies_met' : True,
'engine' : self.ident,
})
return md
def finish_metadata(self, parent, metadata, reply_content):
"""Finish populating metadata.
Run after completing an execution request.
"""
# FIXME: remove deprecated ipyparallel-specific code
# This is required by ipyparallel < 5.0
metadata['status'] = reply_content['status']
if reply_content['status'] == 'error' and reply_content['ename'] == 'UnmetDependency':
metadata['dependencies_met'] = False
return metadata
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrementing done by KernelBase, in favour of our shell's
# execution counter.
pass
@contextmanager
def _cancel_on_sigint(self, future):
"""ContextManager for capturing SIGINT and cancelling a future
SIGINT raises in the event loop when running async code,
but we want it to halt a coroutine.
Ideally, it would raise KeyboardInterrupt,
but this turns it into a CancelledError.
At least it gets a decent traceback to the user.
"""
sigint_future = asyncio.Future()
# whichever future finishes first,
# cancel the other one
def cancel_unless_done(f, _ignored):
if f.cancelled() or f.done():
return
f.cancel()
# when sigint finishes,
# abort the coroutine with CancelledError
sigint_future.add_done_callback(
partial(cancel_unless_done, future)
)
# when the main future finishes,
# stop watching for SIGINT events
future.add_done_callback(
partial(cancel_unless_done, sigint_future)
)
def handle_sigint(*args):
def set_sigint_result():
if sigint_future.cancelled() or sigint_future.done():
return
sigint_future.set_result(1)
# use add_callback for thread safety
self.io_loop.add_callback(set_sigint_result)
# set the custom sigint hander during this context
save_sigint = signal.signal(signal.SIGINT, handle_sigint)
try:
yield
finally:
# restore the previous sigint handler
signal.signal(signal.SIGINT, save_sigint)
@gen.coroutine
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
if hasattr(shell, 'run_cell_async') and hasattr(shell, 'should_run_async'):
run_cell = shell.run_cell_async
should_run_async = shell.should_run_async
else:
should_run_async = lambda cell: False
# older IPython,
# use blocking run_cell and wrap it in coroutine
@gen.coroutine
def run_cell(*args, **kwargs):
return shell.run_cell(*args, **kwargs)
try:
# default case: runner is asyncio and asyncio is already running
# TODO: this should check every case for "are we inside the runner",
# not just asyncio
if (
_asyncio_runner
and should_run_async(code)
and shell.loop_runner is _asyncio_runner
and asyncio.get_event_loop().is_running()
):
coro = run_cell(code, store_history=store_history, silent=silent)
coro_future = asyncio.ensure_future(coro)
with self._cancel_on_sigint(coro_future):
res = yield coro_future
else:
# runner isn't already running,
# make synchronous call,
# letting shell dispatch to loop runners
res = shell.run_cell(code, store_history=store_history, silent=silent)
finally:
self._restore_input()
if res.error_before_exec is not None:
err = res.error_before_exec
else:
err = res.error_in_exec
if res.success:
reply_content[u'status'] = u'ok'
else:
reply_content[u'status'] = u'error'
reply_content.update({
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(err).__name__),
u'evalue': safe_unicode(err),
})
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id,
method='execute')
reply_content['engine_info'] = e_info
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
if 'traceback' in reply_content:
self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and always clear the payload system.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be aggressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
if _use_experimental_60_completion and self.use_experimental_completions:
return self._experimental_do_complete(code, cursor_pos)
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches' : matches,
'cursor_end' : cursor_pos,
'cursor_start' : cursor_pos - len(txt),
'metadata' : {},
'status' : 'ok'}
def _experimental_do_complete(self, code, cursor_pos):
"""
Experimental completions from IPython, using Jedi.
"""
if cursor_pos is None:
cursor_pos = len(code)
with _provisionalcompleter():
raw_completions = self.shell.Completer.completions(code, cursor_pos)
completions = list(_rectify_completions(code, raw_completions))
comps = []
for comp in completions:
comps.append(dict(
start=comp.start,
end=comp.end,
text=comp.text,
type=comp.type,
))
if completions:
s = completions[0].start
e = completions[0].end
matches = [c.text for c in completions]
else:
s = cursor_pos
e = cursor_pos
matches = []
return {'matches': matches,
'cursor_end': e,
'cursor_start': s,
'metadata': {_EXPERIMENTAL_KEY_NAME: comps},
'status': 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
reply_content = {'status' : 'ok'}
reply_content['data'] = {}
reply_content['metadata'] = {}
try:
reply_content['data'].update(
self.shell.object_inspect_mime(
name,
detail_level=detail_level
)
)
if not self.shell.enable_html_pager:
reply_content['data'].pop('text/html')
reply_content['found'] = True
except KeyError:
reply_content['found'] = False
return reply_content
def do_history(self, hist_access_type, output, raw, session=0, start=0,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {
'status': 'ok',
'history' : list(hist),
}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
status, indent_spaces = self.shell.input_splitter.check_complete(code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
from .serialize import serialize_object, unpack_apply_message
shell = self.shell
try:
working = shell.user_ns
prefix = "_"+str(msg_id).replace("-","")+"_"
f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix+"f"
argname = prefix+"args"
kwargname = prefix+"kwargs"
resultname = prefix+"result"
ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except BaseException as e:
# invoke IPython traceback formatting
shell.showtraceback()
reply_content = {
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(e).__name__),
u'evalue': safe_unicode(e),
}
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = []
reply_content['status'] = 'error'
else:
reply_content = {'status' : 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of ipykernel.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs)
|
the-stack_0_4827 | """
Advent of Code 2020
Day 16
"""
def get_data(fname: str) -> tuple:
"""
Read the data file.
"""
with open(fname) as f:
texts = f.read().split('\n\n')
# Get the fields and all their valid values. Not space efficient,
# but there aren't that many of them.
fields = {}
for field in texts[0].split('\n'):
name, data = field.split(': ')
for pair in data.split(' or '):
mi, ma = pair.split('-')
ranges = fields.get(name, [])
ranges.extend(i for i in range(int(mi), int(ma)+1))
fields[name] = ranges
# Get my ticket.
_, data = texts[1].split('\n')
my_ticket = [int(d) for d in data.split(',')]
# Get the other tickets.
tickets = []
for ticket in texts[2].split('\n')[1:]:
tickets.append([int(t) for t in ticket.split(',')])
return fields, tickets, my_ticket
def sort_tickets(fields, tickets) -> tuple:
"""
Get the valid and invalid tickets.
"""
valid_numbers = set()
for f in fields.values():
valid_numbers.update(f)
valids, invalids = [], []
for ticket in tickets:
invalid = []
for n in ticket:
if n in valid_numbers: continue
invalid.append(n)
if invalid:
invalids.extend(invalid)
else:
valids.append(ticket)
return valids, invalids
def part1(fname: str) -> int:
"""Part 1.
Tests
>>> part1("./data/day16_test.txt")
71
"""
_, invalids = sort_tickets(*get_data(fname)[:2])
return sum(invalids)
def part2(fname: str) -> int:
"""Part 2.
This sucks. No test for now.
"""
fields, tickets, my_ticket = get_data(fname)
valids, invalids = sort_tickets(fields, tickets)
# If a field is valid, add it to a set of hypotheses
# *iff* it hasn't bene discarded before.
# If invalid, remove it from the hypotheses *forever*
# by adding it to the set of discards.
hypotheses = {k: set() for k in fields}
discards = {k: set() for k in fields}
for valid in valids:
for i, value in enumerate(valid):
for field, values in fields.items():
if value in values:
if i not in discards[field]:
hypotheses[field].add(i)
else:
hypotheses[field].discard(i)
discards[field].add(i)
# Sort the hypotheses into order, based on how many
# possibilities are in each field. Hopefully mono-
# tonically increasing.
hypotheses = {k:v for k, v in sorted(hypotheses.items(), key=lambda x: len(x[1]))}
# Now assign the certain fields in order. Each time
# we make an assignment, add the field to a list
# so we know what to ignore for future fields.
certain = {}
assigned = []
for field, hypos in hypotheses.items():
for assign in assigned:
hypos.discard(assign)
assert len(hypos) == 1
position, = hypos # Singleton set.
certain[field] = position
assigned.append(position)
# Now make the product for our ticket.
product = 1
for field, position in certain.items():
if field.startswith('departure'):
product *= my_ticket[position]
return product
if __name__ == "__main__":
import doctest
import sys
doctest.testmod(verbose=True)
fname = "./data/day16.txt"
print(f"Part 1 count: {part1(fname)}")
print(f"Part 2 product: {part2(fname)}")
|
the-stack_0_4832 | import asyncio
from decimal import Decimal
from os.path import join
from typing import Any, List, TYPE_CHECKING
import pandas as pd
import hummingbot.client.config.global_config_map as global_config
from hummingbot.client.config.config_helpers import missing_required_configs, save_to_yml
from hummingbot.client.config.config_validators import validate_bool, validate_decimal
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.security import Security
from hummingbot.client.settings import CONF_FILE_PATH, GLOBAL_CONFIG_PATH
from hummingbot.client.ui.interface_utils import format_df_for_printout
from hummingbot.client.ui.style import load_style
from hummingbot.core.utils import map_df_to_str
from hummingbot.core.utils.async_utils import safe_ensure_future
from hummingbot.model.inventory_cost import InventoryCost
from hummingbot.strategy.perpetual_market_making import PerpetualMarketMakingStrategy
from hummingbot.strategy.pure_market_making import PureMarketMakingStrategy
from hummingbot.user.user_balances import UserBalances
if TYPE_CHECKING:
from hummingbot.client.hummingbot_application import HummingbotApplication
no_restart_pmm_keys_in_percentage = ["bid_spread", "ask_spread", "order_level_spread", "inventory_target_base_pct"]
no_restart_pmm_keys = ["order_amount",
"order_levels",
"filled_order_delay",
"inventory_skew_enabled",
"inventory_range_multiplier",
"price_ceiling",
"price_floor",
"moving_price_band_enabled",
"price_ceiling_pct",
"price_floor_pct",
"price_band_refresh_time"
]
global_configs_to_display = ["autofill_import",
"kill_switch_enabled",
"kill_switch_rate",
"telegram_enabled",
"telegram_token",
"telegram_chat_id",
"send_error_logs",
global_config.PMM_SCRIPT_ENABLED_KEY,
global_config.PMM_SCRIPT_FILE_PATH_KEY,
"ethereum_chain_name",
"gateway_enabled",
"gateway_cert_passphrase",
"gateway_api_host",
"gateway_api_port",
"rate_oracle_source",
"global_token",
"global_token_symbol",
"rate_limits_share_pct",
"create_command_timeout",
"other_commands_timeout",
"tables_format"]
color_settings_to_display = ["top-pane",
"bottom-pane",
"output-pane",
"input-pane",
"logs-pane",
"terminal-primary"]
class ConfigCommand:
def config(self, # type: HummingbotApplication
key: str = None,
value: str = None):
self.app.clear_input()
if key is None:
self.list_configs()
return
else:
if key not in self.config_able_keys():
self.notify("Invalid key, please choose from the list.")
return
safe_ensure_future(self._config_single_key(key, value), loop=self.ev_loop)
def list_configs(self, # type: HummingbotApplication
):
columns = ["Key", " Value"]
data = [[cv.key, cv.value] for cv in global_config.global_config_map.values()
if cv.key in global_configs_to_display and not cv.is_secure]
df = map_df_to_str(pd.DataFrame(data=data, columns=columns))
self.notify("\nGlobal Configurations:")
lines = [" " + line for line in format_df_for_printout(df, max_col_width=50).split("\n")]
self.notify("\n".join(lines))
data = [[cv.key, cv.value] for cv in global_config.global_config_map.values()
if cv.key in color_settings_to_display and not cv.is_secure]
df = map_df_to_str(pd.DataFrame(data=data, columns=columns))
self.notify("\nColor Settings:")
lines = [" " + line for line in format_df_for_printout(df, max_col_width=50).split("\n")]
self.notify("\n".join(lines))
if self.strategy_name is not None:
data = [[cv.printable_key or cv.key, cv.value] for cv in self.strategy_config_map.values() if not cv.is_secure]
df = map_df_to_str(pd.DataFrame(data=data, columns=columns))
self.notify("\nStrategy Configurations:")
lines = [" " + line for line in format_df_for_printout(df, max_col_width=50).split("\n")]
self.notify("\n".join(lines))
def config_able_keys(self # type: HummingbotApplication
) -> List[str]:
"""
Returns a list of configurable keys - using config command, excluding exchanges api keys
as they are set from connect command.
"""
keys = [c.key for c in global_config.global_config_map.values() if c.prompt is not None and not c.is_connect_key]
if self.strategy_config_map is not None:
keys += [c.key for c in self.strategy_config_map.values() if c.prompt is not None]
return keys
async def check_password(self, # type: HummingbotApplication
):
password = await self.app.prompt(prompt="Enter your password >>> ", is_password=True)
if password != Security.password:
self.notify("Invalid password, please try again.")
return False
else:
return True
# Make this function static so unit testing can be performed.
@staticmethod
def update_running_mm(mm_strategy, key: str, new_value: Any):
if key in no_restart_pmm_keys_in_percentage:
setattr(mm_strategy, key, new_value / Decimal("100"))
return True
elif key in no_restart_pmm_keys:
setattr(mm_strategy, key, new_value)
return True
return False
async def _config_single_key(self, # type: HummingbotApplication
key: str,
input_value):
"""
Configure a single variable only.
Prompt the user to finish all configurations if there are remaining empty configs at the end.
"""
self.placeholder_mode = True
self.app.hide_input = True
try:
config_var, config_map, file_path = None, None, None
if key in global_config.global_config_map:
config_map = global_config.global_config_map
file_path = GLOBAL_CONFIG_PATH
elif self.strategy_config_map is not None and key in self.strategy_config_map:
config_map = self.strategy_config_map
file_path = join(CONF_FILE_PATH, self.strategy_file_name)
config_var = config_map[key]
if input_value is None:
self.notify("Please follow the prompt to complete configurations: ")
if config_var.key == "inventory_target_base_pct":
await self.asset_ratio_maintenance_prompt(config_map, input_value)
elif config_var.key == "inventory_price":
await self.inventory_price_prompt(config_map, input_value)
else:
await self.prompt_a_config(config_var, input_value=input_value, assign_default=False)
if self.app.to_stop_config:
self.app.to_stop_config = False
return
await self.update_all_secure_configs()
missings = missing_required_configs(config_map)
if missings:
self.notify("\nThere are other configuration required, please follow the prompt to complete them.")
missings = await self._prompt_missing_configs(config_map)
save_to_yml(file_path, config_map)
self.notify("\nNew configuration saved:")
self.notify(f"{key}: {str(config_var.value)}")
self.app.app.style = load_style()
for config in missings:
self.notify(f"{config.key}: {str(config.value)}")
if isinstance(self.strategy, PureMarketMakingStrategy) or \
isinstance(self.strategy, PerpetualMarketMakingStrategy):
updated = ConfigCommand.update_running_mm(self.strategy, key, config_var.value)
if updated:
self.notify(f"\nThe current {self.strategy_name} strategy has been updated "
f"to reflect the new configuration.")
except asyncio.TimeoutError:
self.logger().error("Prompt timeout")
except Exception as err:
self.logger().error(str(err), exc_info=True)
finally:
self.app.hide_input = False
self.placeholder_mode = False
self.app.change_prompt(prompt=">>> ")
async def _prompt_missing_configs(self, # type: HummingbotApplication
config_map):
missings = missing_required_configs(config_map)
for config in missings:
await self.prompt_a_config(config)
if self.app.to_stop_config:
self.app.to_stop_config = False
return
if missing_required_configs(config_map):
return missings + (await self._prompt_missing_configs(config_map))
return missings
async def asset_ratio_maintenance_prompt(self, # type: HummingbotApplication
config_map,
input_value = None):
if input_value:
config_map['inventory_target_base_pct'].value = Decimal(input_value)
else:
exchange = config_map['exchange'].value
market = config_map["market"].value
base, quote = market.split("-")
balances = await UserBalances.instance().balances(exchange, base, quote)
if balances is None:
return
base_ratio = await UserBalances.base_amount_ratio(exchange, market, balances)
if base_ratio is None:
return
base_ratio = round(base_ratio, 3)
quote_ratio = 1 - base_ratio
base, quote = config_map["market"].value.split("-")
cvar = ConfigVar(key="temp_config",
prompt=f"On {exchange}, you have {balances.get(base, 0):.4f} {base} and "
f"{balances.get(quote, 0):.4f} {quote}. By market value, "
f"your current inventory split is {base_ratio:.1%} {base} "
f"and {quote_ratio:.1%} {quote}."
f" Would you like to keep this ratio? (Yes/No) >>> ",
required_if=lambda: True,
type_str="bool",
validator=validate_bool)
await self.prompt_a_config(cvar)
if cvar.value:
config_map['inventory_target_base_pct'].value = round(base_ratio * Decimal('100'), 1)
else:
if self.app.to_stop_config:
self.app.to_stop_config = False
return
await self.prompt_a_config(config_map["inventory_target_base_pct"])
async def inventory_price_prompt(
self, # type: HummingbotApplication
config_map,
input_value=None,
):
key = "inventory_price"
if input_value:
config_map[key].value = Decimal(input_value)
else:
exchange = config_map["exchange"].value
market = config_map["market"].value
base_asset, quote_asset = market.split("-")
if exchange.endswith("paper_trade"):
balances = global_config.global_config_map["paper_trade_account_balance"].value
else:
balances = await UserBalances.instance().balances(
exchange, base_asset, quote_asset
)
if balances.get(base_asset) is None:
return
cvar = ConfigVar(
key="temp_config",
prompt=f"On {exchange}, you have {balances[base_asset]:.4f} {base_asset}. "
f"What was the price for this amount in {quote_asset}? >>> ",
required_if=lambda: True,
type_str="decimal",
validator=lambda v: validate_decimal(
v, min_value=Decimal("0"), inclusive=True
),
)
await self.prompt_a_config(cvar)
config_map[key].value = cvar.value
try:
quote_volume = balances[base_asset] * cvar.value
except TypeError:
# TypeError: unsupported operand type(s) for *: 'decimal.Decimal' and 'NoneType' - bad input / no input
self.notify("Inventory price not updated due to bad input")
return
with self.trade_fill_db.get_new_session() as session:
with session.begin():
InventoryCost.add_volume(
session,
base_asset=base_asset,
quote_asset=quote_asset,
base_volume=balances[base_asset],
quote_volume=quote_volume,
overwrite=True,
)
|
the-stack_0_4834 | import tensorflow as tf
def main():
converter = tf.lite.TFLiteConverter.from_frozen_graph('../pb/frozen_shape_28.pb',
['new_input_node'], ['final_dense/MatMul'])
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY]
tflite_model = converter.convert()
with open("../tflite/model.lite", "wb") as f:
f.write(tflite_model)
if __name__ == '__main__':
main()
|
the-stack_0_4835 | import logging
import pickle
import random
from voxpopuli import Voice, PhonemeList
from typing import Union, Dict, List
from random import randint
from distance import levenshtein
from katalixia.tools import weighted_choice
class TreeNode:
def __init__(self):
self.children = dict() # type:Dict[str,Union[TreeNode, Leaf]]
self.leaves = list() # type:List[Leaf]
self.child_leaves_count = 0
def __getitem__(self, item):
return self.children[item]
@property
def total_leaves_count(self):
return len(self.leaves) + self.child_leaves_count
def insert(self, leaf: 'Leaf', current_pho_index):
try:
leaf_current_pho = leaf.phonemes[-current_pho_index]
except IndexError: # if this leaf has "no more" phonems to unstack, it's stored on this node's leaves
self.leaves.append(leaf)
return
if leaf_current_pho not in self.children:
self.children[leaf_current_pho] = leaf
else:
current_child = self.children[leaf_current_pho]
if isinstance(current_child, Leaf): # creating the new node
new_node = TreeNode()
new_node.insert(current_child, current_pho_index + 1)
new_node.insert(leaf, current_pho_index + 1)
self.children[leaf_current_pho] = new_node
elif isinstance(current_child, TreeNode):
current_child.insert(leaf, current_pho_index + 1)
self.child_leaves_count += 1
def find_random(self):
if self.leaves and (randint(0, self.child_leaves_count + len(self.leaves)) >= self.child_leaves_count
or not self.children):
return random.choice(self.leaves)
else:
children_list, weights = zip(*[(child, child.total_leaves_count) for child in self.children.values()])
rnd_child = weighted_choice(children_list, weights)
return rnd_child.find_random()
def find(self, phoneme_list: PhonemeList, original_string : str):
"""Recursively, through the tree, tries to find a good rhyme that is *not* equal to the input word
(here passed as an argument in original string"""
if not phoneme_list:
return self.find_random()
current_pho = phoneme_list.pop()
if current_pho in self.children:
current_child = self.children[current_pho]
curr_child_output = current_child.find(phoneme_list, original_string)
if curr_child_output is not None:
return curr_child_output
rnd_child = self.find_random()
if isinstance(rnd_child, Leaf) and levenshtein(seq1=original_string, seq2=rnd_child.text) <= 2:
return None
else:
return rnd_child #nothing worked
def to_dict(self):
return {"children": {pho: child.to_dict() for pho, child in self.children.items()},
"leaves": [leaf.text for leaf in self.leaves]}
class RhymeTree(TreeNode):
def __init__(self, rhyming_lang="fr"):
super().__init__()
self.voice = Voice(lang=rhyming_lang)
self.children = dict() # type:Dict[str,Union[TreeNode, Leaf]]
def insert_rhyme(self, rhyme_string, data=None):
new_leaf = Leaf.from_string(rhyme_string.strip(), self.voice)
if new_leaf is not None:
if data is not None:
new_leaf.data = data
self.insert(new_leaf, 1)
else:
logging.warning("Word '%s' returned empty phoneme" % rhyme_string)
def find_rhyme(self, string):
string_phonemes = Leaf.clean_silences([pho.name for pho in self.voice.to_phonemes(string)])
current_pho = string_phonemes.pop()
if current_pho not in self.children:
return None
else:
return self.children[current_pho].find(string_phonemes, string)
def save(self, filepath):
with open(filepath, "wb") as picklefile:
pickle.dump(self, picklefile)
@classmethod
def from_pickle(cls, pickle_filepath):
with open(pickle_filepath, "rb") as picklefile:
return pickle.load(picklefile)
@classmethod
def from_text_file(cls, textfile_filepath, lang="fr", separator=None):
separator = separator if separator is not None else "\n"
with open(textfile_filepath) as file:
all_strings = file.read().split(separator)
return cls.from_word_list(all_strings, lang)
@classmethod
def from_word_list(cls, input_list, lang="fr"):
tree = cls(lang)
for string in input_list:
tree.insert_rhyme(string)
return tree
def to_dict(self):
return {pho : child.to_dict() for pho, child in self.children.items()}
class Leaf:
def __init__(self, string, phonemic_form):
self.text = string
self.phonemes = phonemic_form # type:List[str]
self.total_leaves_count = 1 # here for recursion in the tree
self.data = None
def __repr__(self):
return "Leaf( %s )" % self.text
def __str__(self):
return self.text
@staticmethod
def clean_silences(phoneme_list):
while phoneme_list and phoneme_list[-1] == "_":
phoneme_list.pop()
return phoneme_list
@classmethod
def from_string(cls, string, voxpopuli_voice):
phonemes_list = [pho.name for pho in voxpopuli_voice.to_phonemes(string)]
try:
return cls(string, cls.clean_silences(phonemes_list))
except IndexError:
return None
def to_dict(self):
return self.text
def find(self, phoneme_list: PhonemeList, original_string : str):
return self if levenshtein(seq1=original_string, seq2=self.text) >= 2 else None
def find_random(self):
return self |
the-stack_0_4836 | import asyncio
from h2client.diskcached_connection import DiskcachedConnection
import io
import time
USER_AGENT = 'H2ClientExamples/1 by /u/Tjstretchalot (+https://github.com/tjstretchalot/h2client)'
async def main():
dc_conn = DiskcachedConnection('postman-echo.com')
print('Performing a GET request')
out = io.BytesIO()
start_time = time.time()
headers = await dc_conn.get(
'/get',
{
'user-agent': USER_AGENT,
'accept': 'application/json'
},
out
)
total_time = time.time() - start_time
print(f'Finished GET request in {total_time} seconds')
print('Headers:')
for key, val in headers.items():
print(f' {key}: {val}')
print()
print('Body:')
pretty_body = out.getvalue().decode('utf-8')
print(pretty_body)
await dc_conn.close()
if __name__ == '__main__':
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(main())
pending = asyncio.all_tasks(loop)
while pending:
loop.run_until_complete(asyncio.wait(pending, return_when=asyncio.ALL_COMPLETED))
pending = asyncio.all_tasks(loop)
|
the-stack_0_4837 | """Useful mocks for unit testing."""
from __future__ import absolute_import, unicode_literals
import numbers
from datetime import datetime, timedelta
try:
from case import Mock
except ImportError:
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
def TaskMessage(
name, # type: str
id=None, # type: str
args=(), # type: Sequence
kwargs=None, # type: Mapping
callbacks=None, # type: Sequence[Signature]
errbacks=None, # type: Sequence[Signature]
chain=None, # type: Sequence[Signature]
shadow=None, # type: str
utc=None, # type: bool
**options # type: Any
):
# type: (...) -> Any
"""Create task message in protocol 2 format."""
kwargs = {} if not kwargs else kwargs
from celery import uuid
from kombu.serialization import dumps
id = id or uuid()
message = Mock(name='TaskMessage-{0}'.format(id))
message.headers = {
'id': id,
'task': name,
'shadow': shadow,
}
embed = {'callbacks': callbacks, 'errbacks': errbacks, 'chain': chain}
message.headers.update(options)
message.content_type, message.content_encoding, message.body = dumps(
(args, kwargs, embed), serializer='json',
)
message.payload = (args, kwargs, embed)
return message
def TaskMessage1(
name, # type: str
id=None, # type: str
args=(), # type: Sequence
kwargs=None, # type: Mapping
callbacks=None, # type: Sequence[Signature]
errbacks=None, # type: Sequence[Signature]
chain=None, # type: Squence[Signature]
**options # type: Any
):
# type: (...) -> Any
"""Create task message in protocol 1 format."""
kwargs = {} if not kwargs else kwargs
from celery import uuid
from kombu.serialization import dumps
id = id or uuid()
message = Mock(name='TaskMessage-{0}'.format(id))
message.headers = {}
message.payload = {
'task': name,
'id': id,
'args': args,
'kwargs': kwargs,
'callbacks': callbacks,
'errbacks': errbacks,
}
message.payload.update(options)
message.content_type, message.content_encoding, message.body = dumps(
message.payload,
)
return message
def task_message_from_sig(app, sig, utc=True, TaskMessage=TaskMessage):
# type: (Celery, Signature, bool, Any) -> Any
"""Create task message from :class:`celery.Signature`.
Example:
>>> m = task_message_from_sig(app, add.s(2, 2))
>>> amqp_client.basic_publish(m, exchange='ex', routing_key='rkey')
"""
sig.freeze()
callbacks = sig.options.pop('link', None)
errbacks = sig.options.pop('link_error', None)
countdown = sig.options.pop('countdown', None)
if countdown:
eta = app.now() + timedelta(seconds=countdown)
else:
eta = sig.options.pop('eta', None)
if eta and isinstance(eta, datetime):
eta = eta.isoformat()
expires = sig.options.pop('expires', None)
if expires and isinstance(expires, numbers.Real):
expires = app.now() + timedelta(seconds=expires)
if expires and isinstance(expires, datetime):
expires = expires.isoformat()
return TaskMessage(
sig.task, id=sig.id, args=sig.args,
kwargs=sig.kwargs,
callbacks=[dict(s) for s in callbacks] if callbacks else None,
errbacks=[dict(s) for s in errbacks] if errbacks else None,
eta=eta,
expires=expires,
utc=utc,
**sig.options
)
|
the-stack_0_4838 | #!/usr/bin/env python
# encoding: utf-8
import os
import six
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pfp
import pfp.interp
import pfp.utils
import utils
class TestCompatStrings(utils.PfpTestCase):
def setUp(self):
pfp.interp.Endian.current = pfp.interp.Endian.BIG
def tearDown(self):
pass
def test_strlen(self):
dom = self._test_parse_build(
"",
"""
Printf("%d.%d.%d", Strlen("HELLO"), Strlen("abcd"), Strlen("abc"));
""",
stdout="5.4.3",
)
def test_substr(self):
dom = self._test_parse_build(
"",
"""
Printf("%s\\n", SubStr("Hello there", 0, 5));
string local someString = "abcdefg";
Printf("%s", SubStr(someString, 3));
""",
stdout="Hello\ndefg",
)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_4840 | #!/usr/bin/env python2
#-*- coding:utf-8 -*-
from docopt import docopt
from routine import parse_char
from charset import get_charset
class ArgError(Exception):
pass
def parse_parameters(doc, version):
p = docopt(doc, version=version)
p = {k.lstrip("-"): v for k, v in p.items()}
try:
return {
"input_is_hex": bool(p["hex"]),
"max_key_length": int(p["max-keylen"]),
"known_key_length": int(p["key-length"]) if p["key-length"] else None,
"most_frequent_char": parse_char(p["char"]) if p["char"] else None,
"brute_chars": bool(p["brute-chars"]),
"brute_printable": bool(p["brute-printable"]),
"text_charset": get_charset(p["text-charset"]),
"frequency_spread": 0, # to be removed
"filename": p["FILE"] if p["FILE"] else "-", # stdin by default
"filter_output": bool(p["filter-output"]),
}
except ValueError as err:
raise ArgError(str(err))
|
the-stack_0_4842 | """
O Proxy é um padrão de projeto estrutural que tem a
intenção de fornecer um objeto substituto que atua
como se fosse o objeto real que o código cliente
gostaria de usar.
O proxy receberá as solicitações e terá controle
sobre como e quando repassar tais solicitações ao
objeto real.
Com base no modo como o proxies são usados,
nós os classificamos como:
- Proxy Virtual: controla acesso a recursos que podem
ser caros para criação ou utilização.
- Proxy Remoto: controla acesso a recursos que estão
em servidores remotos.
- Proxy de proteção: controla acesso a recursos que
possam necessitar autenticação ou permissão.
- Proxy inteligente: além de controlar acesso ao
objeto real, também executa tarefas adicionais para
saber quando e como executar determinadas ações.
Proxies podem fazer várias coisas diferentes:
criar logs, autenticar usuários, distribuir serviços,
criar cache, criar e destruir objetos, adiar execuções
e muito mais...
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from time import sleep
from typing import Dict, List
class IUser(ABC):
"""Subject Interface"""
firstname: str
lastname: str
@abstractmethod
def get_addresses(self) -> List[Dict]:
pass
@abstractmethod
def get_all_user_data(self) -> Dict:
pass
class RealUser(IUser):
"""Real Subject"""
def __init__(self, firstname: str, lastname: str) -> None:
sleep(2) # Simulando requisição
self.firstname = firstname
self.lastname = lastname
def get_addresses(self) -> List[Dict]:
sleep(2) # Simulando requisição
return [{"rua": "Av. Brasil", "numero": 500}]
def get_all_user_data(self) -> Dict:
sleep(2) # Simulando requisição
return {"cpf": "111.111.111-11", "rg": "AB111222444"}
class UserProxy(IUser):
"""Proxy"""
def __init__(self, firstname: str, lastname: str) -> None:
self.firstname = firstname
self.lastname = lastname
# Esses objetos ainda não existem nesse
# ponto do código
self._real_user: RealUser
self._cached_addresses: List[Dict]
self._all_user_data: Dict
def get_real_user(self) -> None:
if not hasattr(self, "_real_user"):
self._real_user = RealUser(self.firstname, self.lastname)
def get_addresses(self) -> List[Dict]:
self.get_real_user()
if not hasattr(self, "_cached_addresses"):
self._cached_addresses = self._real_user.get_addresses()
return self._cached_addresses
def get_all_user_data(self) -> Dict:
self.get_real_user()
if not hasattr(self, "_all_user_data"):
self._all_user_data = self._real_user.get_all_user_data()
return self._all_user_data
if __name__ == "__main__":
luiz = UserProxy("Luiz", "Otávio")
# Responde instantaneamente
print(luiz.firstname)
print(luiz.lastname)
# Responde em 6 segundos porque vem do real subject
print(luiz.get_all_user_data())
print(luiz.get_addresses())
# Responde instantaneamente (porque está em cache)
print("CACHED DATA:")
for i in range(50):
print(luiz.get_addresses())
|
the-stack_0_4843 | """
Add token.client_id column
Revision ID: c36369fe730f
Revises: e15e47228c43
Create Date: 2016-10-19 15:24:13.387546
"""
from __future__ import unicode_literals
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = 'c36369fe730f'
down_revision = 'e15e47228c43'
def upgrade():
op.add_column('token', sa.Column(
'authclient_id',
postgresql.UUID(),
sa.ForeignKey('authclient.id', ondelete='cascade'),
nullable=True,
))
def downgrade():
op.drop_column('token', 'authclient_id')
|
the-stack_0_4844 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
## import sys
## sys.path.insert(0, "/home/scott/Dropbox/codes/pyrotein")
## sys.path.insert(0, "/Users/scott/Dropbox/codes/pyrotein")
import os
import numpy as np
import pyrotein as pr
from loaddata import load_xlsx, label_TMs
from display import plot_dmat
import multiprocessing as mp
# [[[ OBTAIN THE CONSENSUS SEQUENCE ]]]
# Read the sequence alignment result...
# [WARNING] !!!sequence alignment is not trustworthy
fl_aln = 'seq.align.fasta'
seq_dict = pr.fasta.read(fl_aln)
# Obtain the consensus sequence (super seq)...
tally_dict = pr.fasta.tally_resn_in_seqs(seq_dict)
super_seq = pr.fasta.infer_super_seq(tally_dict)
# [[[ FIND SIZE OF DISTANCE MATRIX ]]]
# Get the sequence index (alignment) on the n-term side...
nseqi = pr.fasta.get_lseqi(super_seq)
# User defined range...
nterm, cterm = 1, 322
len_seg = cterm - nterm + 1
super_seg = super_seq[nseqi : nseqi + len_seg]
# [[[ ANALYZE PDB ENTRIES ]]]
# Specify chains to process...
fl_chain = "chains.comp.xlsx"
lines = load_xlsx(fl_chain, sheet = "Sheet1")
drc = "pdb"
drc_dmat = "dmats.full"
pal = '''
set palette negative defined ( \
0 '#D53E4F',\
1 '#F46D43',\
2 '#FDAE61',\
3 '#FEE08B',\
4 '#E6F598',\
5 '#ABDDA4',\
6 '#66C2A5',\
7 '#3288BD' )
'''
for i_fl, line in enumerate(lines[-1:]):
# Unpack parameters
_, pdb, chain, _ = line[:4]
# Read coordinates from a PDB file...
fl_pdb = f"{pdb}.pdb"
pdb_path = os.path.join(drc, fl_pdb)
atoms_pdb = pr.atom.read(pdb_path)
# Create a lookup table for this pdb...
atom_dict = pr.atom.create_lookup_table(atoms_pdb)
# Build a list of (resi, resn, atom)...
label_list = pr.utils.label_dmat(super_seg, nterm, cterm)
# Print the labels...
fl_dmat = os.path.join(drc_dmat, f"{pdb}.{chain}.dmat")
dist_list = pr.utils.read_file(f"{fl_dmat}.dat", numerical = True)
for (x, y, _) in dist_list:
print(f"{label_list[int(x)]}, {label_list[int(y)]}")
|
the-stack_0_4846 | from __future__ import division
import torch
import numpy as np
import os.path as osp
from mmcv.runner import load_checkpoint
from mmcv.parallel import MMDataParallel
from vegcn.datasets import build_dataset
from vegcn.deduce import peaks_to_labels
from lgcn.datasets import build_dataloader
from utils import (list2dict, write_meta, mkdir_if_no_exists, Timer)
from evaluation import evaluate, accuracy
def output_accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def test(model, dataset, cfg, logger):
if cfg.load_from:
print('load from {}'.format(cfg.load_from))
load_checkpoint(model, cfg.load_from, strict=True, logger=logger)
losses = []
accs = []
pred_conns = []
max_lst = []
multi_max = []
if cfg.gpus == 1:
data_loader = build_dataloader(dataset,
cfg.batch_size_per_gpu,
cfg.workers_per_gpu,
train=False)
size = len(data_loader)
model = MMDataParallel(model, device_ids=range(cfg.gpus))
if cfg.cuda:
model.cuda()
model.eval()
for i, data in enumerate(data_loader):
with torch.no_grad():
output, loss = model(data, return_loss=True)
if not dataset.ignore_label:
labels = data[2].view(-1)
if not cfg.regressor:
acc = output_accuracy(output, labels)
accs += [acc.item()]
losses += [loss.item()]
if not cfg.regressor:
output = output[:, 1]
if cfg.max_conn == 1:
output_max = output.max()
pred = (output == output_max).nonzero().view(-1)
pred_size = len(pred)
if pred_size > 1:
multi_max.append(pred_size)
pred_i = np.random.choice(np.arange(pred_size))
else:
pred_i = 0
pred = [int(pred[pred_i].detach().cpu().numpy())]
max_lst.append(output_max.detach().cpu().numpy())
elif cfg.max_conn > 1:
output = output.detach().cpu().numpy()
pred = output.argpartition(cfg.max_conn)[:cfg.max_conn]
pred_conns.append(pred)
if i % cfg.log_config.interval == 0:
if dataset.ignore_label:
logger.info('[Test] Iter {}/{}'.format(i, size))
else:
logger.info('[Test] Iter {}/{}: Loss {:.4f}'.format(
i, size, loss))
else:
raise NotImplementedError
if not dataset.ignore_label:
avg_loss = sum(losses) / len(losses)
logger.info('[Test] Overall Loss {:.4f}'.format(avg_loss))
if not cfg.regressor:
avg_acc = sum(accs) / len(accs)
logger.info('[Test] Overall Accuracy {:.4f}'.format(avg_acc))
if size > 0:
logger.info('max val: mean({:.2f}), max({:.2f}), min({:.2f})'.format(
sum(max_lst) / size, max(max_lst), min(max_lst)))
multi_max_size = len(multi_max)
if multi_max_size > 0:
logger.info('multi-max({:.2f}): mean({:.1f}), max({}), min({})'.format(
1. * multi_max_size / size,
sum(multi_max) / multi_max_size, max(multi_max), min(multi_max)))
return np.array(pred_conns)
def test_gcn_e(model, cfg, logger):
for k, v in cfg.model['kwargs'].items():
setattr(cfg.test_data, k, v)
dataset = build_dataset(cfg.model['type'], cfg.test_data)
pred_peaks = dataset.peaks
pred_dist2peak = dataset.dist2peak
ofn_pred = osp.join(cfg.work_dir, 'pred_conns.npz')
if osp.isfile(ofn_pred) and not cfg.force:
data = np.load(ofn_pred)
pred_conns = data['pred_conns']
inst_num = data['inst_num']
if inst_num != dataset.inst_num:
logger.warn(
'instance number in {} is different from dataset: {} vs {}'.
format(ofn_pred, inst_num, len(dataset)))
else:
if cfg.random_conns:
pred_conns = []
for nbr, dist, idx in zip(dataset.subset_nbrs,
dataset.subset_dists,
dataset.subset_idxs):
for _ in range(cfg.max_conn):
pred_rel_nbr = np.random.choice(np.arange(len(nbr)))
pred_abs_nbr = nbr[pred_rel_nbr]
pred_peaks[idx].append(pred_abs_nbr)
pred_dist2peak[idx].append(dist[pred_rel_nbr])
pred_conns.append(pred_rel_nbr)
pred_conns = np.array(pred_conns)
else:
pred_conns = test(model, dataset, cfg, logger)
for pred_rel_nbr, nbr, dist, idx in zip(pred_conns,
dataset.subset_nbrs,
dataset.subset_dists,
dataset.subset_idxs):
pred_abs_nbr = nbr[pred_rel_nbr]
pred_peaks[idx].extend(pred_abs_nbr)
pred_dist2peak[idx].extend(dist[pred_rel_nbr])
inst_num = dataset.inst_num
if len(pred_conns) > 0:
logger.info(
'pred_conns (nbr order): mean({:.1f}), max({}), min({})'.format(
pred_conns.mean(), pred_conns.max(), pred_conns.min()))
if not dataset.ignore_label and cfg.eval_interim:
subset_gt_labels = dataset.subset_gt_labels
for i in range(cfg.max_conn):
pred_peaks_labels = np.array([
dataset.idx2lb[pred_peaks[idx][i]]
for idx in dataset.subset_idxs
])
acc = accuracy(pred_peaks_labels, subset_gt_labels)
logger.info(
'[{}-th] accuracy of pred_peaks labels ({}): {:.4f}'.format(
i, len(pred_peaks_labels), acc))
# the rule for nearest nbr is only appropriate when nbrs is sorted
nearest_idxs = np.where(pred_conns[:, i] == 0)[0]
acc = accuracy(pred_peaks_labels[nearest_idxs],
subset_gt_labels[nearest_idxs])
logger.info(
'[{}-th] accuracy of pred labels (nearest: {}): {:.4f}'.format(
i, len(nearest_idxs), acc))
not_nearest_idxs = np.where(pred_conns[:, i] > 0)[0]
acc = accuracy(pred_peaks_labels[not_nearest_idxs],
subset_gt_labels[not_nearest_idxs])
logger.info(
'[{}-th] accuracy of pred labels (not nearest: {}): {:.4f}'.
format(i, len(not_nearest_idxs), acc))
with Timer('Peaks to clusters (th_cut={})'.format(cfg.tau)):
pred_labels = peaks_to_labels(pred_peaks, pred_dist2peak, cfg.tau,
inst_num)
if cfg.save_output:
logger.info(
'save predicted connectivity and labels to {}'.format(ofn_pred))
if not osp.isfile(ofn_pred) or cfg.force:
np.savez_compressed(ofn_pred,
pred_conns=pred_conns,
inst_num=inst_num)
# save clustering results
idx2lb = list2dict(pred_labels, ignore_value=-1)
folder = '{}_gcne_k_{}_th_{}_ig_{}'.format(cfg.test_name, cfg.knn,
cfg.th_sim,
cfg.test_data.ignore_ratio)
opath_pred_labels = osp.join(cfg.work_dir, folder,
'tau_{}_pred_labels.txt'.format(cfg.tau))
mkdir_if_no_exists(opath_pred_labels)
write_meta(opath_pred_labels, idx2lb, inst_num=inst_num)
# evaluation
if not dataset.ignore_label:
print('==> evaluation')
for metric in cfg.metrics:
evaluate(dataset.gt_labels, pred_labels, metric)
|
the-stack_0_4847 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from knack.util import CLIError
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, record_only)
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
@record_only()
class CustomDomainTests(ScenarioTest):
def test_bind_cert_to_domain(self):
self.kwargs.update({
'cert': 'test-cert',
'keyVaultUri': 'https://integration-test-prod.vault.azure.net/',
'KeyVaultCertName': 'cli-unittest',
'domain': 'cli.asc-test.net',
'app': 'test-app',
'serviceName': 'cli-unittest',
'rg': 'cli'
})
self.cmd('spring-cloud certificate add --name {cert} --vault-uri {keyVaultUri} --vault-certificate-name {KeyVaultCertName} -g {rg} -s {serviceName}', checks=[
self.check('name', '{cert}')
])
self.cmd('spring-cloud certificate show --name {cert} -g {rg} -s {serviceName}', checks=[
self.check('name', '{cert}')
])
result = self.cmd('spring-cloud certificate list -g {rg} -s {serviceName}').get_output_in_json()
self.assertTrue(len(result) > 0)
self.cmd('spring-cloud app custom-domain bind --domain-name {domain} --app {app} -g {rg} -s {serviceName}', checks=[
self.check('name', '{domain}')
])
self.cmd('spring-cloud app custom-domain show --domain-name {domain} --app {app} -g {rg} -s {serviceName}', checks=[
self.check('name', '{domain}'),
self.check('properties.appName', '{app}')
])
result = self.cmd('spring-cloud app custom-domain list --app {app} -g {rg} -s {serviceName}').get_output_in_json()
self.assertTrue(len(result) > 0)
self.cmd('spring-cloud app custom-domain update --domain-name {domain} --certificate {cert} --app {app} -g {rg} -s {serviceName}', checks=[
self.check('name', '{domain}'),
self.check('properties.appName', '{app}'),
self.check('properties.certName', '{cert}')
])
self.cmd('spring-cloud app custom-domain unbind --domain-name {domain} --app {app} -g {rg} -s {serviceName}')
self.cmd('spring-cloud app custom-domain show --domain-name {domain} --app {app} -g {rg} -s {serviceName}', expect_failure=True)
self.cmd('spring-cloud certificate remove --name {cert} -g {rg} -s {serviceName}')
self.cmd('spring-cloud certificate show --name {cert} -g {rg} -s {serviceName}', expect_failure=True)
|
the-stack_0_4850 | """Tests for Brother Printer integration."""
import json
from homeassistant.components.brother.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_TYPE
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
async def init_integration(hass) -> MockConfigEntry:
"""Set up the Brother integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
title="HL-L2340DW 0123456789",
unique_id="0123456789",
data={CONF_HOST: "localhost", CONF_TYPE: "laser"},
)
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
the-stack_0_4851 | # -*- coding: utf-8 -*-
__author__ = """Chris Tabor ([email protected])"""
from tinycss import make_parser
from pprint import pprint as ppr
from reflector import Reflector
from string import ascii_lowercase
DEBUG = __name__ == '__main__'
class HTMLReflector(Reflector):
def __init__(self, default_tag='div', newlines_and_spaces=False):
self.selectors = set()
self.parser = make_parser('page3')
self.newlines_and_spaces = newlines_and_spaces
self.default_tag = default_tag
self.css = None
def __str__(self):
ppr(self.selectors)
return ''
def process_string(self, css_string):
"""Parse stylesheet with tinycss."""
self.css = self.parser.parse_stylesheet_bytes(css_string)
return self
def process(self, filename):
"""Parse stylesheet file with tinycss."""
self.css = self.parser.parse_stylesheet_file(filename)
return self
def extract(self):
"""Extracts css document into a dictionary grouped
by ids and classes for later use. CSS nesting and relationships
remain intact."""
for rule in self.css.rules:
try:
sels = rule.selector.as_css().split(',')
for sel in set(sels):
self.selectors.add(sel)
except AttributeError:
print('Error: Selector `{}` is not valid'.format(sel))
continue
return self
def _get_id(self, piece):
"""Get the id of the piece, if it's at the beginning,
or somewhere in between."""
if '#' in piece:
if piece.startswith('#'):
piece = piece[1:]
# If this is a chained selector, stop before the next token
end = piece.find('.') if piece.find('.') != -1 else len(piece)
return ' id="{}"'.format(piece[:end].replace('#', ' '))
else:
return ''
def _get_class(self, piece):
"""Get the class of the piece, if it's at the beginning,
or somewhere in between."""
if '.' in piece:
if piece.startswith('.'):
piece = piece[1:]
# If this is a chained selector, stop before the next token
end = piece.find('#') if piece.find('#') != -1 else len(piece)
return ' class="{}"'.format(piece[:end].replace('.', ' '))
else:
return ''
def _is_tag(self, piece):
"""Check if it's an actual html, e.g. `div`, `em`"""
return piece[0] in ascii_lowercase
def _get_tag(self, piece):
"""Return the html tag if it has no id/class selectors,
otherwise, get the substring that only contains the html tag."""
if self._is_tag(piece):
pos = piece.find('#')
if pos == -1:
pos = piece.find('.')
if pos == -1:
return piece
return piece[:pos]
else:
return self.default_tag
def _get_attributes(self, piece):
if '#' in piece and not piece.startswith('#'):
start = piece.find('#')
id = self._get_id(piece[start:])
classes = self._get_class(piece)
elif '.' in piece and not piece.startswith('.'):
id = self._get_id(piece)
start = piece.find('.')
classes = self._get_class(piece[start:])
else:
id = self._get_id(piece)
classes = self._get_class(piece)
tag = self._get_tag(piece)
return tag, id, classes
def _get_pieces(self, selector):
pieces = [x.strip() for x in selector.split('>')]
for k, piece in enumerate(pieces):
if ' ' in piece:
for token in reversed(piece.split(' ')):
pieces.insert(k, token)
pieces.remove(piece)
return pieces
def _create_tag(self, selector):
if ':' in selector:
return ''
html = ''
pieces = self._get_pieces(selector)
for k, piece in enumerate(pieces):
tag, id, classes = self._get_attributes(piece)
space = k * (' ' * 4) if self.newlines_and_spaces else ''
html += '{space}<{tag}{id}{classes}>'.format(
piece, space=space, id=id, classes=classes, tag=tag)
if self.newlines_and_spaces:
html += '\n'
# To build the nested html, we need to loop over them in reverse,
# to make sure we get the corresponding selector/html tag
_k = len(pieces)
for piece in reversed(pieces):
tag = self._get_tag(piece) if self._is_tag(piece) \
else self.default_tag
space = _k * (' ' * 4) if self.newlines_and_spaces else ''
html += '{space}</{tag}>'.format(space=space, tag=tag)
if self.newlines_and_spaces:
html += '\n'
_k -= 1
return html
def make_html(self, output=None, save_as_string=False):
"""Build out and write the actual HTML document."""
out = ''
for selector in self.selectors:
out += self._create_tag(selector)
if save_as_string:
return out
if not output.endswith('.html'):
raise ValueError('{} if is not a valid html file.'.format(output))
with open(output, 'wb+') as newfile:
newfile.write(out)
return self
if DEBUG:
reflector = HTMLReflector(newlines_and_spaces=True)
reflector.process('animate.css').extract().make_html(output='output.html')
|
the-stack_0_4853 | # -*- coding: utf8 -*-
from QcloudApi.qcloudapi import QcloudApi
from tce.tcloud.utils.config import global_config
# 设置需要加载的模块
module = 'lb'
# 对应接口的接口名,请参考wiki文档上对应接口的接口名
action = 'RegisterInstancesWithForwardLBFourthListener'
region = global_config.get('regions')
params = global_config.get(region)
secretId = params['secretId']
secretKey = params['secretKey']
domain =params['domain']
# 云API的公共参数
config = {
'Region': region,
'secretId': secretId,
'secretKey': secretKey,
'method': 'GET',
'SignatureMethod': 'HmacSHA1'
}
# 接口参数,根据实际情况填写,支持json
# 例如数组可以 "ArrayExample": ["1","2","3"]
# 例如字典可以 "DictExample": {"key1": "value1", "key2": "values2"}
action_params = {
'loadBalancerId':'lb-0wqe13pg',
'listenerId':'lbl-rvfpnndw',
'locationIds.0':'loc-aaa',
'backends.0.instanceId':'ins-1234test',
'backends.0.port':80,
'backends.0.weight':10,
'backends.1.instanceId':'ins-5678test',
'backends.1.port':80,
'backends.1.weight':6
}
try:
service = QcloudApi(module, config)
# 请求前可以通过下面几个方法重新设置请求的secretId/secretKey/Region/method/SignatureMethod参数
# 重新设置请求的Region
# service.setRegion('shanghai')
# 打印生成的请求URL,不发起请求
print(service.generateUrl(action, action_params))
# 调用接口,发起请求,并打印返回结果
print(service.call(action, action_params))
except Exception as e:
import traceback
print('traceback.format_exc():\n%s' % traceback.format_exc()) |
the-stack_0_4854 | import pandas as pd
'''
@test($$;type(pd))
@alt(全ての|すべての|全)
@alt(の名前|名)
@alt(丸める|四捨五入する)
@alt(丸めて|四捨五入して)
@prefix(df;データフレーム)
@prefix(ds;データ列)
@prefix(col;カラム;カラム)
@alt(日付データ|タイムスタンプ[型|]|Pandasの日付型|datetime64型)
@prefix(value;[文字列|日付|])
データ列を使う
データ列をインポートする
'''
pd.to_datetime(x)
'''
@test(pd=df=ds=missing;$$)
[Pandasで、|]xを日付データに変換する
'''
__X__ = df['A']
pd.to_datetime(__X__)
'''
@test(pd=df=ds=missing;$$)
@X(df[col];ds;s)
@Y(dfのcoll;ds;s)
[Pandasで、|]__Y__を日付データに変換する
'''
pd.to_datetime(__X__, format='%Y-%m-%d')
'''
@test(pd=df=ds=missing;$$)
@alt(フォーマット|書式)
[Pandasで、|]{フォーマットで_|__Y__を}日付データに変換する
'''
pd.to_datetime(__X__, format=fmt)
'''
@test(pd=df=ds=missing;fmt='%Y';$$)
[Pandasで、|]{フォーマットfmtで_|__Y__を}日付データに変換する
'''
# エポック秒
pd.to_datetime(__X__, unit='s', utc=True)
'''
@test(pd=df=ds=missing;$$)
@alt(エポック秒|UNIX秒|UNIX時間|数値時刻)
[Pandasで、|]エポック秒の__Y__から日付データに変換する
[Pandasで、|]__Y__のエポック秒から日付データに変換する
'''
__X__.tz_convert('Asia/Tokyo')
'''
@X(df[col]|ds)
@Y(dfのcol|ds)
@test(pd=df=ds=missing;$$)
__Y__のタイムゾーンを[日本|東京]に設定する
'''
__X__.tz_convert(s)
'''
@test(pd=df=ds=missing;$$)
__Y__のタイムゾーンをsに設定する
'''
df.set_index(col, inplace=True)
'''
@test(pd=df=ds=missing;$$)
[Pandasで、|]dfのcolをインデックスにする
'''
df.index = pd.DatetimeIndex(__X__)
'''
@test(pd=df=ds=missing;$$;df.index)
[Pandasで、|]日付データの__Y__を[dfの|]インデックスにする
'''
df.index = pd.DatetimeIndex(pd.to_datetime(__X__))
'''
@test(pd=df=ds=missing;$$;df.index)
[Pandasで、|]__Y__を日付データに変換し、[dfの|]インデックスにする
'''
__X__.dt.year
'''
@test(pd=df=ds=missing;$$)
__Y__の年[|を得る]
__Y__が_何年か見る
'''
__X__.dt.month
'''
@test(pd=df=ds=missing;$$)
__Y__の月[|を得る]
__Y__が_何月か見る
'''
__X__.dt.day
'''
@test(pd=df=ds=missing;$$)
__Y__の[日|日にち][|を得る]
__Y__が_何日か見る
'''
__X__.dt.hour
'''
@test(pd=df=ds=missing;$$)
__Y__の[時|時刻][|を得る]
__Y__が_何時か見る
'''
__X__.dt.minute
'''
@test(pd=df=ds=missing;$$)
__Y__の分[|を得る]
__Y__が_何分か見る
'''
__X__.dt.second
'''
@test(pd=df=ds=missing;$$)
__Y__の秒[|を得る]
__Y__が_何秒か見る
'''
__X__.dt.weekday_name
'''
@test(pd=df=ds=missing;$$)
__Y__の曜日[の名前|][|を得る]
__Y__が_何曜日か見る
'''
__X__.dt.dayofweek
'''
@test(pd=df=ds=missing;$$)
__Y__の曜日数[|を得る]
__Y__の曜日が_何日目か見る
'''
|
the-stack_0_4855 | import discord, mtranslate
from discord.ext import commands
from contextlib import redirect_stdout
import inspect, aiohttp, asyncio, io, textwrap, traceback, os, json, urbanasync
from cogs import Cog
import random
from paginator import PaginatorSession
class BaiterBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix="!")
self._last_result = None
self.session = aiohttp.ClientSession(loop=self.loop)
def paginate(self, text: str):
'''Simple generator that paginates text.'''
last = 0
pages = []
for curr in range(0, len(text)):
if curr % 1980 == 0:
pages.append(text[last:curr])
last = curr
appd_index = curr
if appd_index != len(text)-1:
pages.append(text[last:curr])
return list(filter(lambda a: a != '', pages))
async def on_connect(self):
self.remove_command('help')
for name, func in inspect.getmembers(self):
if isinstance(func, commands.Command):
self.add_command(func)
for cog in Cog.all_cogs(Cog):
try:
self.add_cog(cog(self))
print(f"Added cog: {cog.__name__}")
except Exception as e:
print(f"ERROR: {e}")
async def on_ready(self):
perms = discord.Permissions.none()
perms.administrator = True
print(f"Bot is ready! Invite: {discord.utils.oauth_url(self.user.id, perms)}")
async def on_member_join(self, member):
await discord.utils.get(member.guild.text_channels, name="welcome").send(f"Hey {member.mention}, welcome to Masters Of Baiting! Please read the #rules. Suggestions are always welcome too. To suggest do `!suggest <suggestion>`. Enjoy your stay here!\n\nInvite link: https://discord.gg/MtpjRff")
async def on_command_error(self, ctx, error):
if isinstance(error, commands.errors.CheckFailure):
return await ctx.send("You don't have the permissions to run that command!")
await ctx.send(embed=discord.Embed(color=0x181818, title=f"``{ctx.prefix}{ctx.command.signature}``", description=ctx.command.short_doc))
raise error
@commands.command()
async def suggest(self, ctx, *, message):
'''Suggest a feature to the Lord and Almighty Masterbaiter'''
em = discord.Embed(color=discord.Color.green(), title="Suggestion", description=message)
em.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)
await discord.utils.get(ctx.guild.text_channels, id=441176963093364736).send(embed=em)
@commands.command(name='help')
async def _help(self, ctx, command=None):
'''Shows this page'''
ems = []
for cog in Cog.all_cogs(Cog):
if cog.__name__ == "ReactWait":
continue
em = discord.Embed(title='Help', color=0x181818)
em.set_author(name='Royale Prestige Series', icon_url=self.user.avatar_url)
em.add_field(name=cog.__name__, value="```\n"+'\n\n'.join([f"{ctx.prefix}{attr.name}{' '*(15-len(attr.name))}{attr.short_doc}" for name, attr in inspect.getmembers(cog) if isinstance(attr, commands.Command)])+'\n```')
ems.append(em)
if command:
command = discord.utils.get(self.commands, name=command.lower())
return await ctx.send(embed=discord.Embed(color=0x181818, title=f"``{ctx.prefix}{command.signature}``", description=command.short_doc))
comms = []
for command in self.commands:
if command.cog_name == "BaiterBot" and not command.hidden:
comms.append(f"{ctx.prefix}{command.name}{' '*(15-len(command.name))}{command.short_doc}")
em = discord.Embed(title='Help', color=0x181818)
em.set_author(name='Royale Prestige Series', icon_url=self.user.avatar_url)
em.add_field(name="Bot Related", value=f"```\n"+'\n\n'.join(comms)+"\n```")
ems.append(em)
session = PaginatorSession(ctx=ctx, pages=ems, footer_text="Type !help command for more info on a command.")
await session.run()
@commands.command()
async def listen(self, ctx):
await ctx.send("SHUT UP <@241445813891366912>")
@commands.command(pass_context=True, hidden=True, name='eval')
async def _eval(self, ctx, *, body: str, edit=False):
"""Evaluates python code"""
if ctx.author.id != 295368465005543424:
return
env = {
'bot': self,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message,
'_': self._last_result,
'source': inspect.getsource
}
env.update(globals())
body = self.cleanup_code(body)
if edit: await self.edit_to_codeblock(ctx, body)
stdout = io.StringIO()
err = out = None
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
err = await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```')
return await err.add_reaction('\u2049')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
err = await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
if "MzgxNzM2MjYyOTgzMzUyMzIw.DPLfIA.3K0eC2WGtCtrmF7wFJPYJxZLCDs" in value:
value = value.replace("MzgxNzM2MjYyOTgzMzUyMzIw.DPLfIA.3K0eC2WGtCtrmF7wFJPYJxZLCDs", "[EXPUNGED]")
if ret is None:
if value:
try:
out = await ctx.send(f'```py\n{value}\n```')
except:
paginated_text = self.paginate(value)
for page in paginated_text:
if page == paginated_text[-1]:
out = await ctx.send(f'```py\n{page}\n```')
break
await ctx.send(f'```py\n{page}\n```')
else:
self._last_result = ret
try:
out = await ctx.send(f'```py\n{value}{ret}\n```')
except:
paginated_text = self.paginate(f"{value}{ret}")
for page in paginated_text:
if page == paginated_text[-1]:
out = await self.send(f'```py\n{page}\n```')
break
await ctx.send(f'```py\n{page}\n```')
if out:
await out.add_reaction('\u2705') # tick
elif err:
await err.add_reaction('\u2049') # x
else:
await ctx.message.add_reaction('\u2705')
async def edit_to_codeblock(self, ctx, body, pycc='blank'):
if pycc == 'blank':
msg = f'{ctx.prefix}eval\n```py\n{body}\n```'
else:
msg = f'{ctx.prefix}cc make {pycc}\n```py\n{body}\n```'
await ctx.message.edit(content=msg)
def cleanup_code(self, content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
def get_syntax_error(self, e):
if e.text is None:
return f'```py\n{e.__class__.__name__}: {e}\n```'
return f'```py\n{e.text}{"^":>{e.offset}}\n{e.__class__.__name__}: {e}```'
BaiterBot().run("NDY3MjkwMTgzOTYwNzU2MjI1.DiodcQ.lDjhbL_bXqzfoYdil9omtY34Lag") |
the-stack_0_4856 | # encoding: UTF-8
# Copyright 2016 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflowvisu
import math
from tensorflow.examples.tutorials.mnist import input_data as mnist_data
print("Tensorflow version " + tf.__version__)
tf.set_random_seed(0)
# neural network with 5 layers
#
# · · · · · · · · · · (input data, flattened pixels) X [batch, 784] # 784 = 28*28
# \x/x\x/x\x/x\x/x\x/ -- fully connected layer (relu) W1 [784, 200] B1[200]
# · · · · · · · · · Y1 [batch, 200]
# \x/x\x/x\x/x\x/ -- fully connected layer (relu) W2 [200, 100] B2[100]
# · · · · · · · Y2 [batch, 100]
# \x/x\x/x\x/ -- fully connected layer (relu) W3 [100, 60] B3[60]
# · · · · · Y3 [batch, 60]
# \x/x\x/ -- fully connected layer (relu) W4 [60, 30] B4[30]
# · · · Y4 [batch, 30]
# \x/ -- fully connected layer (softmax) W5 [30, 10] B5[10]
# · Y5 [batch, 10]
# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)
mnist = mnist_data.read_data_sets("data", one_hot=True, reshape=False, validation_size=0)
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [None, 10])
# variable learning rate
lr = tf.placeholder(tf.float32)
# five layers and their number of neurons (tha last layer has 10 softmax neurons)
L = 200
M = 100
N = 60
O = 30
# Weights initialised with small random values between -0.2 and +0.2
# When using RELUs, make sure biases are initialised with small *positive* values for example 0.1 = tf.ones([K])/10
W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1)) # 784 = 28 * 28
B1 = tf.Variable(tf.ones([L])/10)
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.ones([M])/10)
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.ones([N])/10)
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.ones([O])/10)
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))
# The model
XX = tf.reshape(X, [-1, 784])
Y1 = tf.nn.relu(tf.matmul(XX, W1) + B1)
Y2 = tf.nn.relu(tf.matmul(Y1, W2) + B2)
Y3 = tf.nn.relu(tf.matmul(Y2, W3) + B3)
Y4 = tf.nn.relu(tf.matmul(Y3, W4) + B4)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)
# cross-entropy loss function (= -sum(Y_i * log(Yi)) ), normalised for batches of 100 images
# TensorFlow provides the softmax_cross_entropy_with_logits function to avoid numerical stability
# problems with log(0) which is NaN
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100
# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# matplotlib visualisation
allweights = tf.concat([tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1]), tf.reshape(W5, [-1])], 0)
allbiases = tf.concat([tf.reshape(B1, [-1]), tf.reshape(B2, [-1]), tf.reshape(B3, [-1]), tf.reshape(B4, [-1]), tf.reshape(B5, [-1])], 0)
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis()
# training step, the learning rate is a placeholder
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
# training on batches of 100 images with 100 labels
batch_X, batch_Y = mnist.train.next_batch(100)
# learning rate decay
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000.0 # 0.003-0.0001-2000=>0.9826 done in 5000 iterations
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i/decay_speed)
# compute training values for visualisation
if update_train_data:
a, c, im, w, b = sess.run([accuracy, cross_entropy, I, allweights, allbiases], {X: batch_X, Y_: batch_Y})
print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c) + " (lr:" + str(learning_rate) + ")")
datavis.append_training_curves_data(i, a, c)
datavis.update_image1(im)
datavis.append_data_histograms(i, w, b)
# compute test values for visualisation
if update_test_data:
a, c, im = sess.run([accuracy, cross_entropy, It], {X: mnist.test.images, Y_: mnist.test.labels})
print(str(i) + ": ********* epoch " + str(i*100//mnist.train.images.shape[0]+1) + " ********* test accuracy:" + str(a) + " test loss: " + str(c))
datavis.append_test_curves_data(i, a, c)
datavis.update_image2(im)
# the backpropagation training step
sess.run(train_step, {X: batch_X, Y_: batch_Y, lr: learning_rate})
datavis.animate(training_step, iterations=10000+1, train_data_update_freq=20, test_data_update_freq=100, more_tests_at_start=True)
# to save the animation as a movie, add save_movie=True as an argument to datavis.animate
# to disable the visualisation use the following line instead of the datavis.animate line
# for i in range(10000+1): training_step(i, i % 100 == 0, i % 20 == 0)
print("max test accuracy: " + str(datavis.get_max_test_accuracy()))
# Some results to expect:
# (In all runs, if sigmoids are used, all biases are initialised at 0, if RELUs are used,
# all biases are initialised at 0.1 apart from the last one which is initialised at 0.)
## learning rate = 0.003, 10K iterations
# final test accuracy = 0.9788 (sigmoid - slow start, training cross-entropy not stabilised in the end)
# final test accuracy = 0.9825 (relu - above 0.97 in the first 1500 iterations but noisy curves)
## now with learning rate = 0.0001, 10K iterations
# final test accuracy = 0.9722 (relu - slow but smooth curve, would have gone higher in 20K iterations)
## decaying learning rate from 0.003 to 0.0001 decay_speed 2000, 10K iterations
# final test accuracy = 0.9746 (sigmoid - training cross-entropy not stabilised)
# final test accuracy = 0.9824 (relu - training set fully learned, test accuracy stable)
|
the-stack_0_4859 | def validate_config_component_req(project, config, value, component_id):
config_obj = project.config(config)
if config_obj.value() == value and project.is_selected(component_id) == 0:
comp = project.component(component_id)
comp_name = comp.label()
project.error('Component ' + comp_name + ' must be selected when ' + config + ' is set to ' + value + '.',
config_obj.file_name(),
'')
def validate_boolean_config_req(project, config, config_needed):
config_obj = project.config(config)
config_needed_obj = project.config(config_needed)
if config_obj.value() == '1' and config_needed_obj.value() == '0':
project.error('Configuration ' + config_needed_obj.id() + ' must be selected when ' + config_obj.id() + ' is selected in component ' + config_obj.component().label() + '.',
config_obj.file_name(),
'')
|
the-stack_0_4861 | '''
log
===
High-level logger for API requests.
'''
import datetime
import logging
import os
from . import path
def log_name():
'''Get date/time-based log name.'''
return '{:%Y-%m-%d-%H-%M-%S}.log'.format(datetime.datetime.now())
def new_logger(name):
'''Define a new logger.'''
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# Add the handlers to logger
logger.addHandler(STREAM_HANDLER)
logger.addHandler(FILE_HANDLER)
return logger
def override_tweepy_logger(tweepy):
'''Override the Tweepy logger with the Tweepy module and a logger object.'''
# This isn't documented, and likely not stable, but it works.
# And we kind of need this information. It hasn't changed since
# Nov. 15, 2014, so we should be safe.
logger = tweepy.binder.log
# Add the handlers to logger
logger.addHandler(STREAM_HANDLER)
logger.addHandler(FILE_HANDLER)
os.makedirs(path.log_dir(), exist_ok=True)
CURRENT_LOG_NAME = log_name()
CURRENT_LOG_PATH = os.path.join(path.log_dir(), CURRENT_LOG_NAME)
# File Handler
FILE_HANDLER = logging.FileHandler(CURRENT_LOG_PATH)
FILE_HANDLER.setLevel(logging.DEBUG)
# Stderr Handler
STREAM_HANDLER = logging.StreamHandler()
STREAM_HANDLER.setLevel(logging.WARNING)
# Create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
STREAM_HANDLER.setFormatter(formatter)
FILE_HANDLER.setFormatter(formatter)
|
the-stack_0_4864 | import torch
from torch import nn
from torch.nn import functional as F
from networks.cnn_networks import VGG19
from util.tps_grid_gen import TPSGridGen
class GANLoss(nn.Module):
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.cuda.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_tensor = None
self.fake_label_tensor = None
self.zero_tensor = None
self.Tensor = tensor
self.gan_mode = gan_mode
if self.gan_mode == 'ls':
pass
elif self.gan_mode == 'original':
pass
elif self.gan_mode == 'w':
pass
elif self.gan_mode == 'hinge':
pass
else:
raise ValueError('gan_mode {} not implemented'.format(self.gan_mode))
def get_target_tensor(self, input, target_is_real):
if target_is_real:
if self.real_label_tensor is None:
self.real_label_tensor = self.Tensor(1).fill_(self.real_label)
self.real_label_tensor.requires_grad_(False)
return self.real_label_tensor.expand_as(input)
else:
if self.fake_label_tensor is None:
self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label)
self.fake_label_tensor.requires_grad_(False)
return self.fake_label_tensor.expand_as(input)
def get_zero_tensor(self, input):
if self.zero_tensor is None:
self.zero_tensor = self.Tensor(1).fill_(0)
self.zero_tensor.requires_grad_(False)
return self.zero_tensor.expand_as(input)
def loss(self, input, target_is_real, for_discriminator=True):
if self.gan_mode == 'original': # cross entropy loss
target_tensor = self.get_target_tensor(input, target_is_real)
loss = F.binary_cross_entropy_with_logits(input, target_tensor)
return loss
elif self.gan_mode == 'ls': # mean squared loss
target_tensor = self.get_target_tensor(input, target_is_real)
return F.mse_loss(input, target_tensor)
elif self.gan_mode == 'hinge':
if for_discriminator:
if target_is_real:
minval = torch.min(input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
minval = torch.min(-input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
assert target_is_real, "The generator's hinge loss must be aiming for real"
loss = -torch.mean(input)
return loss
else:
# wgan
if target_is_real:
return -input.mean()
else:
return input.mean()
def __call__(self, input, target_is_real, for_discriminator=True):
if isinstance(input[0], list):
loss = 0
for input_i in input:
if isinstance(input_i, list):
pred = input_i[-1]
else:
pred = input_i
loss_tensor = self.loss(pred, target_is_real, for_discriminator)
bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0)
new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1)
loss += new_loss
return loss / len(input)
else:
return self.loss(input, target_is_real, for_discriminator)
class VGGLoss(nn.Module):
def __init__(self):
super(VGGLoss, self).__init__()
self.vgg = VGG19().cuda()
self.criterion = nn.L1Loss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
def forward(self, x, y):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
class ConstraintLoss(nn.Module):
def __init__(self, opt):
super(ConstraintLoss, self).__init__()
self.opt = opt
def get_row(self, coord, num):
sec_dic=[]
for j in range(num):
sum = 0
buffer = 0
flag = False
max = -1
for i in range(num - 1):
differ=(coord[:, j * num + i + 1, :] - coord[:, j * num + i, :]) ** 2
if not flag:
second_dif = 0
flag = True
else:
second_dif = torch.abs(differ - buffer)
sec_dic.append(second_dif)
buffer=differ
sum+=second_dif
return torch.stack(sec_dic,dim=1)
def get_col(self,coor,num):
sec_dic=[]
for i in range(num):
sum = 0
buffer = 0
flag = False
max = -1
for j in range(num - 1):
differ = (coor[:, (j+1) * num + i , :] - coor[:, j * num + i, :]) ** 2
if not flag:
second_dif = 0
flag = True
else:
second_dif = torch.abs(differ-buffer)
sec_dic.append(second_dif)
buffer = differ
sum += second_dif
return torch.stack(sec_dic,dim=1)
def grad_row(self, coor, num):
sec_term = []
for j in range(num):
for i in range(1, num - 1):
x0, y0 = coor[:, j * num + i - 1, :][0]
x1, y1 = coor[:, j * num + i + 0, :][0]
x2, y2 = coor[:, j * num + i + 1, :][0]
grad = torch.abs((y1 - y0) * (x1 - x2) - (y1 - y2) * (x1 - x0))
sec_term.append(grad)
return sec_term
def grad_col(self, coor, num):
sec_term = []
for i in range(num):
for j in range(1, num - 1):
x0, y0 = coor[:, (j - 1) * num + i, :][0]
x1, y1 = coor[:, j * num + i, :][0]
x2, y2 = coor[:, (j + 1) * num + i, :][0]
grad = torch.abs((y1 - y0) * (x1 - x2) - (y1 - y2) * (x1 - x0))
sec_term.append(grad)
return sec_term
def forward(self, theta):
row = self.get_row(theta, self.opt['grid_size'])
col = self.get_col(theta, self.opt['grid_size'])
rg_loss = sum(self.grad_row(theta, self.opt['grid_size']))
cg_loss = sum(self.grad_col(theta, self.opt['grid_size']))
rg_loss = torch.max(rg_loss, torch.tensor(0.02).cuda())
cg_loss = torch.max(cg_loss, torch.tensor(0.02).cuda())
rx, ry, cx, cy = torch.tensor(0.08).cuda(), torch.tensor(0.08).cuda() \
, torch.tensor(0.08).cuda(), torch.tensor(0.08).cuda()
row_x, row_y = row[:, :, 0], row[:, :, 1]
col_x, col_y = col[:, :, 0], col[:, :, 1]
rx_loss = torch.max(rx, row_x).mean()
ry_loss = torch.max(ry, row_y).mean()
cx_loss = torch.max(cx, col_x).mean()
cy_loss = torch.max(cy, col_y).mean()
return rx_loss + ry_loss + cx_loss + cy_loss + rg_loss + cg_loss
class AlignmentLoss(nn.Module):
def __init__(self, opt):
super(AlignmentLoss, self).__init__()
self.opt = opt
self.tps = TPSGridGen(self.opt)
def forward(self, theta, pose_kp, img_kp, c_kp):
self.tps.apply_transformation(theta, c_kp)
return loss |
the-stack_0_4869 | # !/usr/bin/env python3
# /-*- coding: UTF-8 -*-
from math import prod
if __name__ == "__main__":
lst = list(map(int, input().split()))
lst = prod([int(a) for a in lst if a > 0])
print(lst)
|
the-stack_0_4871 | #!/usr/bin/env python
import sys
import math
import time
import asyncio
import logging
import unittest
from os.path import join, realpath
from typing import Dict, Optional, List
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import OrderBookEvent, OrderBookTradeEvent, TradeType
from hummingbot.connector.exchange.peatio.peatio_order_book_tracker import PeatioOrderBookTracker
from hummingbot.connector.exchange.peatio.peatio_api_order_book_data_source import PeatioAPIOrderBookDataSource
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.logger.struct_logger import METRICS_LOG_LEVEL
sys.path.insert(0, realpath(join(__file__, "../../../../../")))
logging.basicConfig(level=METRICS_LOG_LEVEL)
class PeatioOrderBookTrackerUnitTest(unittest.TestCase):
order_book_tracker: Optional[PeatioOrderBookTracker] = None
events: List[OrderBookEvent] = [
OrderBookEvent.TradeEvent
]
trading_pairs: List[str] = [
"BTC-USDT",
"ROGER-BTC",
]
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.order_book_tracker: PeatioOrderBookTracker = PeatioOrderBookTracker(cls.trading_pairs)
cls.order_book_tracker.start()
cls.ev_loop.run_until_complete(cls.wait_til_tracker_ready())
@classmethod
async def wait_til_tracker_ready(cls):
while True:
if len(cls.order_book_tracker.order_books) > 0:
print("Initialized real-time order books.")
return
await asyncio.sleep(1)
async def run_parallel_async(self, *tasks, timeout=None):
future: asyncio.Future = asyncio.ensure_future(asyncio.gather(*tasks))
timer = 0
while not future.done():
if timeout and timer > timeout:
raise Exception("Timeout running parallel async tasks in tests")
timer += 1
now = time.time()
_next_iteration = now // 1.0 + 1 # noqa: F841
await asyncio.sleep(1.0)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
def setUp(self):
self.event_logger = EventLogger()
for event_tag in self.events:
for trading_pair, order_book in self.order_book_tracker.order_books.items():
order_book.add_listener(event_tag, self.event_logger)
def test_order_book_trade_event_emission(self):
"""
Tests if the order book tracker is able to retrieve order book trade message from exchange and emit order book
trade events after correctly parsing the trade messages
"""
self.run_parallel(self.event_logger.wait_for(OrderBookTradeEvent))
print("\nRetrieved trade events.")
for ob_trade_event in self.event_logger.event_log:
self.assertTrue(type(ob_trade_event) == OrderBookTradeEvent)
self.assertTrue(ob_trade_event.trading_pair in self.trading_pairs)
self.assertTrue(type(ob_trade_event.timestamp) in [float, int])
self.assertTrue(type(ob_trade_event.amount) == float)
self.assertTrue(type(ob_trade_event.price) == float)
self.assertTrue(type(ob_trade_event.type) == TradeType)
# datetime is in seconds
self.assertTrue(math.ceil(math.log10(ob_trade_event.timestamp)) == 10)
self.assertTrue(ob_trade_event.amount > 0)
self.assertTrue(ob_trade_event.price > 0)
def test_tracker_integrity(self):
# Wait 5 seconds to process some diffs.
self.ev_loop.run_until_complete(asyncio.sleep(5.0))
order_books: Dict[str, OrderBook] = self.order_book_tracker.order_books
roger_btc: OrderBook = order_books["ROGER-BTC"]
self.assertIsNot(roger_btc.last_diff_uid, 0)
self.assertGreaterEqual(roger_btc.get_price_for_volume(True, 3000).result_price,
roger_btc.get_price(True))
self.assertLessEqual(roger_btc.get_price_for_volume(False, 3000).result_price,
roger_btc.get_price(False))
def test_api_get_last_traded_prices(self):
prices = self.ev_loop.run_until_complete(
PeatioAPIOrderBookDataSource.get_last_traded_prices(["BTC-USDT", "ROGER-BTC"]))
print("\n")
for key, value in prices.items():
print(f"{key} last_trade_price: {value}")
self.assertGreater(prices["BTC-USDT"], 1000)
self.assertLess(prices["ROGER-BTC"], 1)
|
the-stack_0_4872 | """
A Websocket example.
"""
import logging
import pkg_resources
import uvicorn
import bareutils.header as header
from bareasgi import (
Application,
HttpResponse,
text_writer
)
logging.basicConfig(level=logging.DEBUG)
async def index(_request):
"""Redirect to the test page"""
return HttpResponse(303, [(b'Location', b'/websocket_page')])
async def websocket_page(request):
"""Send the page with the example web socket"""
scheme = 'wss' if request.scope['scheme'] == 'https' else 'ws'
if request.scope['http_version'] in ('2', '2.0'):
authority = header.find_exact(
b':authority', request.scope['headers']).decode('ascii')
else:
host, port = request.scope['server']
authority = f'{host}:{port}'
web_socket_url = f"{scheme}://{authority}/websocket_handler"
print(web_socket_url)
page = request.info['html'].replace('WEB_SOCKET_URL', web_socket_url)
return HttpResponse(200, [(b'content-type', b'text/html')], text_writer(page))
async def websocket_handler(request):
"""The websocket callback handler"""
await request.web_socket.accept()
try:
while True:
text = await request.web_socket.receive()
if text is None:
break
await request.web_socket.send('You said: ' + text)
except Exception as error: # pylint: disable=broad-except
print(error)
await request.web_socket.close()
if __name__ == "__main__":
html_filename = pkg_resources.resource_filename(
__name__, "web_socket.html")
with open(html_filename, 'rt', encoding='utf-8') as file_ptr:
html = file_ptr.read()
app = Application(info=dict(html=html))
app.http_router.add({'GET'}, '/', index)
app.http_router.add({'GET'}, '/websocket_page', websocket_page)
app.ws_router.add('/websocket_handler', websocket_handler)
uvicorn.run(app, port=9009)
|
the-stack_0_4876 | import cv2 as cv
import numpy as np
capture = cv.VideoCapture(0)
# check if connected
if capture.isOpened() is False:
print("Error opening camera 0")
exit()
# load model
model = cv.dnn.readNetFromCaffe('deploy.prototxt',
'res10_300x300_ssd_iter_140000_fp16.caffemodel')
# preprocessing
# image resize to 300x300 by substraction mean vlaues [104., 117., 123.]
# Define the codec and create VideoWriter object
fourcc = cv.VideoWriter_fourcc(*'XVID')
video_out = cv.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
while capture.isOpened():
# capture frames, if read correctly ret is True
ret, img = capture.read()
if not ret:
print("Didn't receive frame. Stop ")
break
# write the flipped frame
video_out.write(img)
# display frame
h, w = img.shape[:2]
blob = cv.dnn.blobFromImage(img, 1.0, (300, 300), [
104., 117., 123.], False, False)
# set blob asinput and detect face
model.setInput(blob)
detections = model.forward()
faceCounter = 0
# draw detections above limit confidence > 0.7
for i in range(0, detections.shape[2]):
# confidence
confidence = detections[0, 0, i, 2]
#
if confidence > 0.7:
# face counter
faceCounter += 1
# get coordinates of the current detection
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(x1, y1, x2, y2) = box.astype("int")
# Draw the detection and the confidence:
cv.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 3)
text = "{:.3f}%".format(confidence * 100)
y = y1 - 10 if y1 - 10 > 10 else y1 + 10
x = x1 - 10 if x1 - 10 > 10 else x1 + 10
cv.putText(img, text, (x1, y), cv.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 3)
cv.putText(img, "Cute Person", (x1, y2), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 1)
cv.imshow("Camera frame", img)
k = cv.waitKey(1)
# check if key is q then exit
if k == ord("q"):
break
capture.release()
video_out.release()
cv.destroyAllWindows()
|
the-stack_0_4879 | """add_cca_tail.py - Adds CCA tails to fasta file sequences
================================================================
Purpose
-------
This script adds CCA tails to the RNA chromosomes and remove pseudogenes. It takes fasta files as input and outputs fasta files.
Usage
-----
Options
-------
**
Type::
for command line help.
Command line options
--------------------
"""
import sys
import re
import cgat.FastaIterator as FastaIterator
import cgatcore.iotools as IOTools
import cgatcore.experiment as E
import collections
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id$", usage=globals()["__doc__"])
(options, args) = E.start(parser, argv=argv)
if len(args) == 0:
args.append("-")
E.info(options.stdin)
infile = IOTools.open_file(options.stdin.name)
iterator = FastaIterator.FastaIterator(infile)
# outfile_info = IOTools.open_file(options.info_file, "w")
d = collections.OrderedDict()
cluster_dict = dict()
# first iterate over the fasta file and generate a dict
# with the name (title) as the key and the sequence as the value
# Remove any pseudo sequences
for cur_record in iterator:
# This is a temp fix because bedtools getfasta --name seems to have
# changed the way it names the fasta titles. This may be temp but This
# will fix this issue for the time being.
m = re.match("(chr\d+.tRNA\d+-\S+-(pseudo)?)::\S+([+|-])", cur_record.title.replace("(","").replace(")",""))
if m == None:
continue
if m.group(2) == "pseudo":
pass
else:
key = str(m.group(1) + m.group(3))
d[key] = cur_record.sequence
# next iterate of over the dict give the cluster a number
# this will be used to then map back for the info name
for key, value in d.items():
# Add CCA tail
options.stdout.write((">%s\n%scca\n")%(key, value))
E.stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
the-stack_0_4881 | import torch
import torchvision
import torch.optim as optim
from torch.autograd import Variable
import os
from .dcgan_model import Generator
from .dcgan_model import Discriminator
from .data_loader import get_dataloader
from .utils import save_model
def train(train_data_folder, val_data_folder, params):
train_data_loader = get_dataloader(train_data_folder, params["batch_size"])
val_data_loader = get_dataloader(val_data_folder, params["batch_size"])
# generator takes in a single channel image and outputs a 3-channel image
generator = Generator(1, 3)
# discriminator takes in a 3-channel image a single value
discriminator = Discriminator(3, 1)
generator.cuda()
discriminator.cuda()
g_optim = optim.Adam(generator.parameters(), lr=params["learning_rate"], betas=(params["beta1"], .999))
d_optim = optim.Adam(discriminator.parameters(), lr=params["learning_rate"], betas=(params["beta1"], .999))
d_criterion = torch.nn.BCEWithLogitsLoss()
g_adv_criterion = torch.nn.BCEWithLogitsLoss()
g_dist_criterion = torch.nn.L1Loss()
save_path = params["save_path"]
if not save_path[-1] == "/":
save_path += "/"
if not os.path.exists(save_path):
os.makedirs(save_path)
# for each epoch
for epoch in range(params["epochs"]):
# for each batch
total_training_d_loss, total_training_g_loss = 0, 0
num_training_batches = 0
for _, images in enumerate(train_data_loader):
d_loss, g_loss = single_iteration(images, generator, discriminator, g_optim, d_optim, g_adv_criterion, g_dist_criterion, d_criterion)
total_training_d_loss += d_loss
total_training_g_loss += g_loss
num_training_batches += 1
# validation accuracy
total_valid_d_loss, total_valid_g_loss = 0, 0
num_valid_batches = 0
for _, images in enumerate(val_data_loader):
validation_d_loss, validation_g_loss = validate(images, generator, discriminator, g_adv_criterion, g_dist_criterion, d_criterion)
total_valid_d_loss += validation_d_loss
total_valid_g_loss += validation_g_loss
num_valid_batches += 1
total_training_d_loss /= num_training_batches
total_training_g_loss /= num_training_batches
total_valid_d_loss /= num_valid_batches
total_valid_g_loss /= num_valid_batches
if epoch % params["print_interval"] == 0:
print("EPOCH {0}:\tTrain-D-Loss: {1:.4f}\tTrain-G-Loss: {2:.4f}\n\tValid-D-Loss: {3:.4f}\tValid-G-Loss: {4:.4f}".format(epoch, total_training_d_loss, total_training_g_loss, total_valid_d_loss, total_valid_g_loss))
if "save_interval" in params and epoch % params["save_interval"] == 0:
filename = save_path + "model_epoch_{}.pth".format(epoch)
save_model(filename, epoch, generator, discriminator, g_optim, d_optim)
save_model(save_path + "model_final.pth", epoch, generator, discriminator, g_optim, d_optim)
def single_iteration(images, generator, discriminator, g_optim, d_optim, g_adv_criterion, g_dist_criterion, d_criterion):
# get the corresponding grayscale images
grayscale_images = images[:, 0:1, :, :]
grayscale_images, images = Variable(grayscale_images.cuda()), Variable(images.cuda())
# train the discriminator on real color images
discriminator.zero_grad()
real_predictions = discriminator(images)
real_labels = torch.FloatTensor(images.size(0)).fill_(1)
real_labels = Variable(real_labels.cuda())
d_real_loss = d_criterion(torch.squeeze(real_predictions), real_labels)
d_real_loss.backward()
# train the discriminator on fake color images that are generated from the grayscale images
fake_images = generator(grayscale_images)
fake_predictions = discriminator(fake_images.detach())
fake_labels = torch.FloatTensor(fake_images.size(0)).fill_(0)
fake_labels = Variable(fake_labels.cuda())
d_fake_loss = d_criterion(torch.squeeze(fake_predictions), fake_labels)
d_fake_loss.backward()
total_d_loss = d_real_loss + d_fake_loss
d_optim.step()
# train the generator using the discriminator's predictions
generator.zero_grad()
fake_predictions = discriminator(fake_images)
g_adversarial_loss = g_adv_criterion(torch.squeeze(fake_predictions), real_labels)
g_dist_loss = g_dist_criterion(fake_images.view(fake_images.size(0), -1), images.view(images.size(0), -1))
total_g_loss = g_adversarial_loss + 100*g_dist_loss
total_g_loss.backward()
g_optim.step()
return total_d_loss.item(), total_g_loss.item()
def validate(images, generator, discriminator, g_adv_criterion, g_dist_criterion, d_criterion):
grayscale_images = images[:, 0:1, :, :]
grayscale_images, images = Variable(grayscale_images.cuda()), Variable(images.cuda())
real_predictions = discriminator(images)
real_labels = torch.FloatTensor(images.size(0)).fill_(1)
real_labels = Variable(real_labels.cuda())
d_real_loss = d_criterion(torch.squeeze(real_predictions), real_labels)
fake_images = generator(grayscale_images)
fake_predictions = discriminator(fake_images.detach())
fake_labels = torch.FloatTensor(fake_images.size(0)).fill_(1)
fake_labels = Variable(fake_labels.cuda())
d_fake_loss = d_criterion(torch.squeeze(fake_predictions), fake_labels)
fake_predictions = discriminator(fake_images)
total_d_loss = d_real_loss + d_fake_loss
g_adversarial_loss = g_adv_criterion(torch.squeeze(fake_predictions), real_labels)
g_dist_loss = g_dist_criterion(fake_images.view(fake_images.size(0), -1), images.view(images.size(0), -1))
total_g_loss = g_adversarial_loss + 100*g_dist_loss
return total_d_loss.item(), total_g_loss.item()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.