ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a49a373661ed76c484f9ef94526d786934b35ca
i = 0 while i < 10: j += i i += 1
py
1a49a3ad08eed409237bd0c3c0fe220e71e17abf
from cirq_qaoa.cirq_max_cut_solver import define_grid_qubits, solve_maxcut def main(): size = 2 steps = 2 qubits = define_grid_qubits(size=size) qubit_pairs = [(qubits[0], qubits[1]), (qubits[0], qubits[2]), (qubits[1], qubits[2])] solve_maxcut(qubit_pairs=qubit_pairs, steps=steps) if __name__ == '__main__': main()
py
1a49a3be6da862b4eb681f7d9f0d3ca7a1d44ee7
# -*- coding: utf-8 -*- # pidlockfile/__init__.py # PID lock file implementation for use with # ‘python-daemon’, an implementation of PEP 3143. # # Copyright © 2018 Alexei Igonine <[email protected]> # # This is free software: you may copy, modify, and/or distribute this work # under the terms of the Apache License, version 2.0 as published by the # Apache Software Foundation. # No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details. from __future__ import(absolute_import, unicode_literals) from .pidlockfile import(PIDLockFile, AlreadyLocked, LockTimeout)
py
1a49a630a83cdca8f0188ba0b9bc5421c21cbe7e
"""Functions for Github authorisation.""" try: input = raw_input except NameError: pass import getpass import json import requests # Keyring stores passwords by a 'username', but we're not storing a username and # password fake_username = "ipython_tools" token = None def get_auth_token(): global token if token is not None: return token import keyring token = keyring.get_password("github", fake_username) if token is not None: return token print( "Please enter your github username and password. These are not " "stored, only used to get an oAuth token. You can revoke this at " "any time on Github." ) user = input("Username: ") pw = getpass.getpass("Password: ") auth_request = { "scopes": ["public_repo", "gist"], "note": "IPython tools", "note_url": "https://github.com/ipython/ipython/tree/master/tools", } response = requests.post( "https://api.github.com/authorizations", auth=(user, pw), data=json.dumps(auth_request), ) response.raise_for_status() token = json.loads(response.text)["token"] keyring.set_password("github", fake_username, token) return token def make_auth_header(): return {"Authorization": "token " + get_auth_token()} def post_issue_comment(project, num, body): url = "https://api.github.com/repos/{project}/issues/{num}/comments".format( project=project, num=num ) payload = json.dumps({"body": body}) requests.post(url, data=payload, headers=make_auth_header()) def post_gist(content, description="", filename="file", auth=False): """Post some text to a Gist, and return the URL.""" post_data = json.dumps( { "description": description, "public": True, "files": {filename: {"content": content}}, } ).encode("utf-8") headers = make_auth_header() if auth else {} response = requests.post( "https://api.github.com/gists", data=post_data, headers=headers ) response.raise_for_status() response_data = json.loads(response.text) return response_data["html_url"] def get_pull_request(project, num, github_api=3): """get pull request info by number github_api : version of github api to use """ if github_api == 2: url = "http://github.com/api/v2/json/pulls/{project}/{num}".format( project=project, num=num ) elif github_api == 3: url = "https://api.github.com/repos/{project}/pulls/{num}".format( project=project, num=num ) response = requests.get(url) response.raise_for_status() if github_api == 2: return json.loads(response.text)["pull"] return json.loads(response.text) def get_pulls_list(project, github_api=3): """get pull request list github_api : version of github api to use """ if github_api == 3: url = "https://api.github.com/repos/{project}/pulls".format(project=project) else: url = "http://github.com/api/v2/json/pulls/{project}".format(project=project) response = requests.get(url) response.raise_for_status() if github_api == 2: return json.loads(response.text)["pulls"] return json.loads(response.text)
py
1a49a6a9285eea8d1f62660ba2c70e17caba7de4
# -*- coding: utf-8 -*- # ***************************************************************************** # NICOS, the Networked Instrument Control System of the MLZ # Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Module authors: # Mark Koennecke <[email protected]> # Jakob Lass <[email protected]> # # ***************************************************************************** """ This is part of the TAS library which implemnts Mark Lumsden's UB matrix algorithm for triple axis. See J. Appl. Cryst. (2005). 38, 405-411 https://doi.org/10.1107/S0021889805004875 for reference. The original implementation was in ANSII-C by Mark Koennecke at PSI. This implementation has been ported from C to python by Jakob Lass, then also at PSI """ from copy import deepcopy import numpy as np from nicos_sinq.sxtal.singlexlib import matFromTwoVectors from nicos_sinq.sxtal.trigd import Acosd, Atand2, Cosd, Rtand, Sind, \ angleBetween def tasAngleBetween(v1, v2): return np.rad2deg(angleBetween(v1, v2)) def fmod(x, y): s = np.sign(x) res = s*np.mod(np.abs(x), y) return res class tasQEPosition(): def __init__(self, ki, kf, qh, qk, ql, qm): self.ki = ki self.kf = kf self.qh = qh self.qk = qk self.ql = ql self.qm = qm class tasAngles(): def __init__(self, monochromator_two_theta, a3, sample_two_theta, sgl, sgu, analyzer_two_theta): self.monochromator_two_theta = monochromator_two_theta self.a3 = a3 self.sample_two_theta = sample_two_theta self.sgu = sgu self.sgl = sgl self.analyzer_two_theta = analyzer_two_theta class tasReflection(): def __init__(self, qe=None, angles=None, ki=None, kf=None, qh=None, qk=None, ql=None, qm=None, monochromator_two_theta=None, a3=None, sample_two_theta=None, sgl=None, sgu=None, analyzer_two_theta=None): if isinstance(qe, tasReflection): self.qe = deepcopy(qe.qe) self.angles = deepcopy(qe.angles) else: if qe is None: self.qe = tasQEPosition(ki, kf, qh, qk, ql, qm) else: self.qe = qe if angles is None: self.angles = tasAngles(monochromator_two_theta, a3, sample_two_theta, sgl, sgu, analyzer_two_theta) else: self.angles = angles def __getattr__(self, key): # if key in ['qe','angles']: # Is automatically tested # return self.__dict__[key] if key in self.qe.__dict__.keys(): return getattr(self.qe, key) elif key in self.angles.__dict__.keys(): return getattr(self.angles, key) else: raise AttributeError( "'tasReflection' object hs no attribute '{}'".format(key)) ECONST = 2.072 # 2.072122396 def energyToK(energy): """Convert energy in meV to K in q/A""" return np.sqrt(energy / ECONST) def KToEnergy(K): """Convert K in 1/A to E in meV""" return ECONST*np.power(K, 2.0) def tasReflectionToHC(r, B): """Calculate HC from HKL and B matrix""" return tasHKLToHC(r.qh, r.qk, r.ql, B) def tasHKLToHC(qh, qk, ql, B): """Calculate HC from reflection r and B matrix""" h = np.array([qh, qk, ql]) hc = np.dot(B, h) return hc def calcTheta(ki, kf, two_theta): """ |ki| - |kf|cos(two_theta) tan(theta) = -------------------------- |kf|sin(two_theta) """ return Rtand(np.abs(ki) - np.abs(kf) * Cosd(two_theta), np.abs(kf) * Sind(two_theta)) def tasAngleBetweenReflections(B, r1, r2): """Calculate angle between two reflections""" return tasAngleBetweenReflectionsHKL(B, r1.qh, r1.qk, r1.ql, r2.qh, r2.qk, r2.ql) def tasAngleBetweenReflectionsHKL(B, h1, k1, l1, h2, k2, l2): """Calculate angle between two reflections""" v1 = np.array([h1, k1, l1]) v2 = np.array([h2, k2, l2]) chi1 = np.einsum('ij,j...->i...', B, v1) chi2 = np.einsum('ij,j...->i...', B, v2) angle = tasAngleBetween(chi1, chi2) return angle def uFromAngles(om, sgu, sgl): u = np.array([Cosd(om)*Cosd(sgl), -Sind(om)*Cosd(sgu)+Cosd(om)*Sind(sgl)*Sind(sgu), Sind(om)*Sind(sgu)+Cosd(om)*Sind(sgl)*Cosd(sgu)]) return u def calcTasUVectorFromAngles(rr): ss = np.sign(rr.sample_two_theta) r = tasReflection(rr) r.sample_two_theta = np.abs(r.sample_two_theta) theta = calcTheta(r.ki, r.kf, r.sample_two_theta) om = r.angles.a3 - ss*theta m = uFromAngles(om, r.angles.sgu, ss*r.angles.sgl) return m def tasReflectionToQC(r, UB): return tasReflectionToQCHKL(r.qh, r.qk, r.ql, UB) def tasReflectionToQCHKL(h, k, ll, UB): Q = np.array([h, k, ll]) return np.einsum('ij,j...->i...', UB, Q) def makeAuxReflection(B, r1, ss, hkl): r2 = tasReflection(r1) r2.qe.qh, r2.qe.qk, r2.qe.ql = hkl theta = calcTheta(r1.qe.ki, r1.qe.kf, ss*r1.angles.sample_two_theta) om = r1.angles.a3 - ss*theta om += tasAngleBetweenReflectionsHKL(B, r1.qh, r1.qk, r1.ql, *hkl) QC = tasReflectionToHC(r2.qe, B) q = np.linalg.norm(QC) cos2t = np.divide(r1.ki * r1.ki + r1.kf * r1.kf - q * q, (2. * np.abs(r1.ki) * np.abs(r1.kf))) if np.abs(cos2t) > 1.: raise RuntimeError('Scattering angle not closed!') r2.angles.sample_two_theta = ss * Acosd(cos2t) theta = calcTheta(r1.qe.ki, r1.qe.kf, ss*r2.angles.sample_two_theta) r2.angles.a3 = om + ss*theta r2.angles.a3 = fmod(r2.angles.a3 + ss*180., 360.) - ss*180. return r2 def calcTwoTheta(B, ref, ss): QC = tasReflectionToHC(ref, B) q = np.linalg.norm(QC) cos2t = np.divide(ref.ki * ref.ki + ref.kf * ref.kf - q * q, (2. * np.abs(ref.ki) * np.abs(ref.kf))) if np.abs(cos2t) > 1.: raise RuntimeError( 'Calculated abs(cos2t) value {} bigger than 1!' ' Scattering angle not closed'.format(np.abs(cos2t))) value = ss * Acosd(cos2t) return value def calcPlaneNormal(r1, r2): u1 = calcTasUVectorFromAngles(r1) u2 = calcTasUVectorFromAngles(r2) planeNormal = np.cross(u1, u2) planeNormal *= 1.0/np.linalg.norm(planeNormal) # In TasCode code is commented out performing check # for sign of planeNormal[2] is performed. # If negative, z component negated. planeNormal[2] = np.abs(planeNormal[2]) return planeNormal def calcTasUBFromTwoReflections(cell, r1, r2): B = cell.calculateBMatrix() h1 = tasReflectionToHC(r1.qe, B) h2 = tasReflectionToHC(r2.qe, B) HT = matFromTwoVectors(h1, h2) # calculate U vectors and UT matrix u1 = calcTasUVectorFromAngles(r1) u2 = calcTasUVectorFromAngles(r2) UT = matFromTwoVectors(u1, u2) # UT = U * HT U = np.dot(UT, HT.T) UB = np.dot(U, B) return UB def buildRMatrix(UB, planeNormal, qe): U1V = tasReflectionToQC(qe, UB) U1V *= 1.0/np.linalg.norm(U1V) U2V = np.cross(planeNormal, U1V) if np.linalg.norm(U2V) < .0001: raise RuntimeError('Found vector is too short') TV = buildTVMatrix(U1V, U2V) TVINV = np.linalg.inv(TV) return TVINV def buildTVMatrix(U1V, U2V): U2V *= 1.0/np.linalg.norm(U2V) T3V = np.cross(U1V, U2V) T3V *= 1.0/np.linalg.norm(T3V) T = np.zeros((3, 3)) for i in range(3): T[i][0] = U1V[i] T[i][1] = U2V[i] T[i][2] = T3V[i] return T def calcTasQAngles(UB, planeNormal, ss, a3offset, qe): R = buildRMatrix(UB, planeNormal, qe) angles = tasAngles(0, 0, 0, 0, 0, 0) cossgl = np.sqrt(R[0][0]*R[0][0]+R[1][0]*R[1][0]) angles.sgl = ss*Atand2(-R[2][0], cossgl) if np.abs(angles.sgl - 90.) < .5: raise RuntimeError('Combination of UB and Q is not valid') # Now, this is slightly different then in the publication by M. Lumsden. # The reason is that the atan2 helps to determine the sign of om # whereas the sin, cos formula given by M. Lumsden yield ambiguous signs # especially for om. # sgu = atan(R[2][1],R[2][2]) where: # R[2][1] = cos(sgl)sin(sgu) # R[2][2] = cos(sgu)cos(sgl) # om = atan(R[1][0],R[0][0]) where: # R[1][0] = sin(om)cos(sgl) # R[0][0] = cos(om)cos(sgl) # The definitions of the R components are taken from M. Lumsden # R-matrix definition. om = Atand2(R[1][0]/cossgl, R[0][0]/cossgl) angles.sgu = Atand2(R[2][1]/cossgl, R[2][2]/cossgl) QC = tasReflectionToQC(qe, UB) # q = 2.*np.pi*np.linalg.norm(QC) q = np.linalg.norm(QC) cos2t = (qe.ki * qe.ki + qe.kf * qe.kf - q * q) / (2. * np.abs(qe.ki) * np.abs(qe.kf)) if np.abs(cos2t) > 1.: raise RuntimeError('Scattering angle cannot ' 'be closed, cos2t = ', cos2t) theta = calcTheta(qe.ki, qe.kf, Acosd(cos2t)) angles.sample_two_theta = ss * Acosd(cos2t) angles.a3 = om + ss*theta + a3offset # # put a3 into -180, 180 properly. We can always turn by 180 # because the scattering geometry is symmetric in this respect. # It is like looking at the scattering plane from the other side angles.a3 = fmod(angles.a3 + ss*180., 360.) - ss*180. return angles def calcScatteringPlaneNormal(qe1, qe2): v1 = [qe1.qh, qe1.qk, qe1.ql] v2 = [qe2.qh, qe2.qk, qe2.ql] planeNormal = np.cross(v1, v2) planeNormal *= 1.0/np.linalg.norm(planeNormal) return planeNormal def calcTasQH(ub, angles, ki, kf): ubinv = np.linalg.inv(ub) om = angles.a3 sample_two_theta = angles.sample_two_theta sgu = angles.sgu sgl = angles.sgl ss = np.sign(sample_two_theta) theta = calcTheta(ki, kf, abs(sample_two_theta)) om = om - ss*theta qv = uFromAngles(om, sgu, ss*sgl) # normalize the QV vector to be the length of the Q vector # Thereby take into account the physicists magic fudge # 2PI factor q = np.sqrt(ki**2 + kf**2 - 2. * ki * kf * Cosd(sample_two_theta)) # The line below depends on the 2PI conventions. q /= np.pi*2. qv *= q return ubinv.dot(qv)
py
1a49a858e1499d62eadbd5289bbf493598cbb9ab
__title__ = 'mrscrub' __description__ = 'MR data deidentifer' __url__ = 'https://github.com/harvard-nrg/mrscrub' __version__ = '0.2.0' __author__ = 'Neuroinformatics Research Group' __author_email__ = '[email protected]'
py
1a49a8aa48281506a9e9e4dd2027c3dd84bc7913
from telethon.tl.functions.account import UpdateProfileRequest from telethon.tl.functions.photos import DeletePhotosRequest, UploadProfilePhotoRequest from telethon.tl.functions.users import GetFullUserRequest from telethon.tl.types import InputPhoto from userbot import CMD_HELP, LOGS, STORAGE, bot from userbot.events import register if not hasattr(STORAGE, "userObj"): STORAGE.userObj = False @register(outgoing=True, pattern=r"^\.impostor ?(.*)") async def impostor(event): inputArgs = event.pattern_match.group(1) if "restore" in inputArgs: await event.edit("**Voltando à minha verdadeira identidade...**") if not STORAGE.userObj: return await event.edit( "**Você precisa clonar um perfil antes de reverter!**" ) await updateProfile(STORAGE.userObj, restore=True) return await event.edit("**Revertido com sucesso!**") if inputArgs: try: user = await event.client.get_entity(inputArgs) except: return await event.edit("**Nome de usuário/ID inválido.**") userObj = await event.client(GetFullUserRequest(user)) elif event.reply_to_msg_id: replyMessage = await event.get_reply_message() if replyMessage.sender_id is None: return await event.edit("**Não é possível se passar por administradores anônimos, sed.**") userObj = await event.client(GetFullUserRequest(replyMessage.sender_id)) else: return await event.edit( "**Use** `.help impersonate` **para aprender como usá-lo.**" ) if not STORAGE.userObj: STORAGE.userObj = await event.client(GetFullUserRequest(event.sender_id)) LOGS.info(STORAGE.userObj) await event.edit("**Roubando a identidade dessa pessoa aleatória...**") await updateProfile(userObj) await event.edit("**Eu sou você e você é eu, somos um só.**") async def updateProfile(userObj, restore=False): firstName = ( "Deleted Account" if userObj.user.first_name is None else userObj.user.first_name ) lastName = "" if userObj.user.last_name is None else userObj.user.last_name userAbout = userObj.about if userObj.about is not None else "" userAbout = "" if len(userAbout) > 70 else userAbout if restore: userPfps = await bot.get_profile_photos("me") userPfp = userPfps[0] await bot( DeletePhotosRequest( id=[ InputPhoto( id=userPfp.id, access_hash=userPfp.access_hash, file_reference=userPfp.file_reference, ) ] ) ) else: try: userPfp = userObj.profile_photo pfpImage = await bot.download_media(userPfp) await bot(UploadProfilePhotoRequest(await bot.upload_file(pfpImage))) except BaseException: pass await bot( UpdateProfileRequest(about=userAbout, first_name=firstName, last_name=lastName) ) CMD_HELP.update( { "impostor": ">`.impostor` (como uma resposta a uma mensagem de um usuário)\ \n**Uso:** Rouba a identidade do usuário.\ \n\n>`.impostor <username/ID>`\ \n**Uso:** Rouba do nome de usuário/ID fornecido.\ \n\n>`.impostor restore`\ \n**Uso:** Reverta para sua verdadeira identidade.\ \n\n**Sempre restaure antes de executá-lo novamente.**\ " } )
py
1a49a97a13fac6fa7f162837bc66c910632aab48
from time import time,sleep import VideoServers class Camera(object): """An emulated camera implementation that streams a repeated sequence of files 1.jpg, 2.jpg and 3.jpg at a rate of one frame per second.""" def __init__(self): VideoServers.ClientNum += 1 self.frames = [open(f + '.jpg', 'rb').read() for f in ['1', '2', '3']] self.last=open("default.jpg","rb").read() def get_frame(self): sleep(.01) if VideoServers.ImagesQueue.empty() == False: self.last = VideoServers.ImagesQueue.get() return self.last return self.last def __del__(self): VideoServers.ClientNum -= 1
py
1a49ab30ff70de119f534682c72a2fb983affa8d
from .base_model import BaseModel from .inverse_online import AdaptiveLinearModel from .bc import BehaviouralCloning, BehaviouralCloningDeep, BehaviouralCloningLSTM from .cate_nets import CIRL from .rcal import RCAL __all__ = [ "BaseModel", "AdaptiveLinearModel", "BehaviouralCloning", "BehaviouralCloningDeep", "BehaviouralCloningLSTM", "CIRL", "RCAL", ]
py
1a49ac5ff5a99a713656748d22b5625d306a6dce
""" Some exception classes for the ondevice client """ class _Exception(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args) self.msg = args[0] for k,v in kwargs.items(): setattr(self, k, v) class ConfigurationError(_Exception): """ Indicates a missing/faulty configuration value """ pass class ImplementationError(_Exception): """ Indicates implementation issues with a command or module """ pass class TransportError(_Exception): """ Indicates a communication error with the server """ pass class UsageError(_Exception): """ Indicates issues with the commandline usage (unknown command, unsupported argument, etc.) """ pass
py
1a49acae6877fdec35a67576c8e09a4fb1dc711c
from nltk.translate.bleu_score import sentence_bleu from torch import nn class Bleu(nn.Module): def forward(self, expected: str, actual: str) -> float: return float(sentence_bleu([expected], actual))
py
1a49acda5dd534221fbfb1ca0deb80f9eb7a2fd1
import tensorflow as tf import os import time from tqdm import tqdm from src.utils import get_cli_params, process_cli_params, \ order_param_settings from src.lva import build_graph, measure_smoothness, VERBOSE from src.train import evaluate_metric_list, update_decays, evaluate_metric import numpy as np def main(p): p = process_cli_params(p) global VERBOSE VERBOSE = p.verbose # ----------------------------- # Set GPU device to use os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(p.which_gpu) config = tf.ConfigProto() config.gpu_options.allow_growth = True # Set seeds np.random.seed(p.seed) tf.set_random_seed(p.seed) # Load data print("=== Loading Data ===") if p.dataset == 'svhn': from src.svhn import read_data_sets dataset = read_data_sets( "../../data/svhn/", n_labeled=p.num_labeled, validation_size=p.validation, one_hot=True, disjoint=False, downsample=True, download_and_extract=True ) else: from src.mnist import read_data_sets dataset = read_data_sets("MNIST_data", n_labeled=p.num_labeled, validation_size=p.validation, one_hot=True, disjoint=False) num_examples = dataset.train.num_examples p.num_examples = num_examples if p.validation > 0: dataset.test = dataset.validation p.iter_per_epoch = (num_examples // p.ul_batch_size) p.num_iter = p.iter_per_epoch * p.end_epoch # ----------------------------- # Build graph g, m, trainable_parameters = build_graph(p) # Collect losses train_losses = [m['loss'], m['cost'], m['uc'], m['vc']] test_losses = [m['cost']] aer = tf.constant(100.0) - m['acc'] if p.measure_smoothness: s = measure_smoothness(g, p) # print(s.get_shape()) train_losses.append(tf.reduce_mean(s)) if p.tb is not False: train_merged = tf.summary.merge([ tf.summary.scalar(x) for x in train_losses ] + [tf.summary.scalar(aer)]) test_merged = tf.summary.merge([ tf.summary.scalar(x) for x in test_losses ] + [tf.summary.scalar(aer)]) # Set up tensorboard logging if not os.path.exists(p.tb): os.makedirs(p.tb_dir) # ----------------------------- print("=== Starting Session ===") sess = tf.Session(config=config) i_iter = 0 # ----------------------------- id_seed_dir = p.id + "/" + "seed-{}".format(p.seed) + "/" # Write logs to appropriate directory log_dir = p.logdir + id_seed_dir if not os.path.exists(log_dir): os.makedirs(log_dir) desc_file = log_dir + "description" with open(desc_file, 'a') as f: print(*order_param_settings(p), sep='\n', file=f, flush=True) print("Trainable parameters:", trainable_parameters, file=f, flush=True) log_file = log_dir + "train_log" # Resume from checkpoint ckpt_dir = p.ckptdir + id_seed_dir ckpt = tf.train.get_checkpoint_state( ckpt_dir) # get latest checkpoint (if any) if ckpt and ckpt.model_checkpoint_path: # if checkpoint exists, # restore the parameters # and set epoch_n and i_iter g['saver'].restore(sess, ckpt.model_checkpoint_path) ep = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[1]) i_iter = (ep + 1) * p.iter_per_epoch print("Restored Epoch ", ep) else: # no checkpoint exists. # create checkpoints directory if it does not exist. if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) init = tf.global_variables_initializer() sess.run(init) i_iter = 0 if p.tb is not False: train_writer = tf.summary.FileWriter(p.tb_dir + '/train', sess.graph) test_writer = tf.summary.FileWriter(p.tb_dir + '/test', sess.graph) # ----------------------------- print("=== Training ===") # ----------------------------- def eval_metrics(dataset, sess, ops): return evaluate_metric_list(dataset, sess, ops, graph=g, params=p) def eval_metric(dataset, sess, op): return evaluate_metric(dataset, sess, op, graph=g, params=p) # Evaluate initial training accuracy and losses # init_loss = evaluate_metric( # mnist.train.labeled_ds, sess, cost) with open(desc_file, 'a') as f: print('================================', file=f, flush=True) print("Initial Train AER: ", eval_metric(dataset.train.labeled_ds, sess, aer), "%", file=f, flush=True) # ----------------------------- # Evaluate initial testing accuracy and cross-entropy loss print("Initial Test AER: ", eval_metric(dataset.test, sess, aer), "%", file=f, flush=True) # print("Initial Test Losses: ", # *eval_metrics( # mnist.test, sess, test_losses), file=f, # flush=True) train_dict = {g['beta1']: p.beta1, g['lr']: p.initial_learning_rate} start = time.time() for i in range(i_iter, p.iter_per_epoch * p.end_epoch): images, labels = dataset.train.next_batch(p.batch_size, p.ul_batch_size) train_dict.update({ g['images']: images, g['labels']: labels, g['train_flag']: True}) _ = sess.run( [g['train_step']], feed_dict=train_dict) if (i > 1) and ((i + 1) % p.iter_per_epoch == 0): # Epoch complete? ep = i // p.iter_per_epoch # Update learning rate and momentum if ((ep + 1) >= p.decay_start_epoch) and ( ep % (p.lr_decay_frequency) == 0): # epoch_n + 1 because learning rate is set for next epoch ratio = 1.0 * (p.end_epoch - (ep + 1)) decay_epochs = p.end_epoch - p.decay_start_epoch ratio = max(0., ratio / decay_epochs) if decay_epochs != 0 else 1.0 train_dict[g['lr']] = (p.initial_learning_rate * ratio) train_dict[g['beta1']] = p.beta1_during_decay # For the last ten epochs, test every epoch if (ep + 1) > (p.end_epoch - 10): p.test_frequency_in_epochs = 1 # --------------------------------------------- # Evaluate every test_frequency_in_epochs if int((ep + 1) % p.test_frequency_in_epochs) == 0: now = time.time() - start if not p.do_not_save: g['saver'].save(sess, ckpt_dir + 'model.ckpt', ep) # --------------------------------------------- # Compute error on testing set (10k examples) test_aer_and_costs = \ eval_metrics(dataset.test, sess, [aer] + test_losses) train_aer = eval_metrics(dataset.train.labeled_ds, sess, [aer]) train_costs = sess.run(train_losses, feed_dict={g['images']: images, g['labels']: labels, g['train_flag']: False}) # Create log of: # time, epoch number, test accuracy, test cross entropy, # train accuracy, train loss, train cross entropy, # train reconstruction loss, smoothness log_i = [int(now), ep] + test_aer_and_costs + train_aer + \ train_costs with open(log_file, 'a') as train_log: print(*log_i, sep=',', flush=True, file=train_log) with open(desc_file, 'a') as f: final_aer = eval_metric(dataset.test, sess, aer) print("Final AER: ", final_aer, "%", file=f, flush=True) sess.close() if __name__ == '__main__': p = get_cli_params() main(p)
py
1a49ad59d8365879123000dfd56e5a1dc1a2dc28
import _plotly_utils.basevalidators class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__(self, plotly_name="ticktextsrc", parent_name="carpet.aaxis", **kwargs): super(TicktextsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), **kwargs, )
py
1a49ae5ecab2baf4e8ed833dfde7cd0074a943ed
# Time: O(|V| + |E|) # Space: O(|V| + |E|) import collections class Solution(object): def possibleBipartition(self, N, dislikes): """ :type N: int :type dislikes: List[List[int]] :rtype: bool """ adj = [[] for _ in xrange(N)] for u, v in dislikes: adj[u-1].append(v-1) adj[v-1].append(u-1) color = [0]*N color[0] = 1 q = collections.deque([0]) while q: cur = q.popleft() for nei in adj[cur]: if color[nei] == color[cur]: return False elif color[nei] == -color[cur]: continue color[nei] = -color[cur] q.append(nei) return True
py
1a49af288dae036f3e5e2956a10fd9540d4767a5
from model.group import Group def test_group_list(app, db): ui_list = app.group.get_group_list() def clean(group): return Group(id=group.id, name=group.name.strip()) db_list = map(clean, db.get_group_list()) assert sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max)
py
1a49b0263d63ea80755ad53e96d660f4b23077de
def process(uri: str): f = open(uri, 'r') stack = [int(x) for x in f.readline().replace('\n', '').strip().split('\t')] prev_sets = [] steps = 0 while str(stack) not in prev_sets: prev_sets.append(str(stack)) maxb = max(stack) index = stack.index(maxb) stack[index] = 0 for i in range(maxb): index = (index + 1) if index != len(stack) - 1 else 0 stack[index] += 1 steps += 1 return steps - prev_sets.index(str(stack)) print(process('input_t')) print(process('input'))
py
1a49b045d34fe884eff5b6977b076c0b80436c6f
#!/usr/bin/env python3 import requests def pull_data(url, username, password, keyfile=None): # This function will extract records or media files from the SurveyCTO Rest API depending on the url provided # Include the keyfile parameter if your form or media file needs to be decrypted try: if keyfile == None: response = requests.get( url, auth=requests.auth.HTTPBasicAuth(username, password)) else: files = {'private_key': open(keyfile, 'rb')} response = requests.post( url, files=files, auth=requests.auth.HTTPBasicAuth(username, password)) except Exception as e: response = False print(e) return response def save_media_file(file_bytes, file_name): f = open(file_name, 'wb') f.write(file_bytes) f.close() def construct_url(form_id, servername): url = f'https://{servername}.surveycto.com/api/v2/forms/data/wide/json/{form_id}?date=0' return url if __name__ == '__main__': # We'll demonstrate pulling data from an encrypted form and an unencrypted form # To pull data from an encrypted form we'll need the .pem file -- you should have downloaded this from the SurveyCTO web console when adding encryption to your form # Here we'll provide details for our encrypted and unencrypted forms. These will be used to construct the requests we send to the SurveyCTO API. form_config = [ { 'form_id': 'encrypted_test_form', 'servername': 'mysctoservername', 'username': 'mysctousername', 'password': 'mysctopassword', 'keyfile': 'path/to/encryption/keyfile.pem' }, { 'form_id': 'unencrypted_test_form', 'servername': 'mysctoservername', 'username': 'mysctousername', 'password': 'mysctopassword', }, ] ####################### # Let's pull form records for the encrypted form form = form_config[0] url = construct_url(form['form_id'], form['servername']) # With the encryption key, we expect to see all the fields included in the response. If we don't include the encryption key the API will only return the unencrypted fields. response = pull_data(url, form['username'], form['password'], form['keyfile']) print(response.json()) # If the form contains media files we can download them using one of the url's returned in the form records url = 'url_to_media_file' response = pull_data(url, form['username'], form['password'], form['keyfile']) file_name = 'myfilename.png' # Choose a filename for saving the media file save_media_file(response.content, file_name) # response.content contains the file bytes which we can save to disk or upload to a 3rd party service like S3 ####################### # Pulling data for the unencrypted form will be the exact same except we don't provide a keyfile for the pull_data function form = form_config[1] url = construct_url(form['form_id'], form['servername']) response = pull_data(url, form['username'], form['password']) print(response.json()) # If the form contains media files we can download them using one of the url's returned in the form records url = 'url_to_media_file' response = pull_data(url, form['username'], form['password']) file_name = 'myfilename.png' # Choose a filename for saving the media file save_media_file(response.content, file_name) # response.content contains the file bytes which we can save to disk or upload to a 3rd party service like S3
py
1a49b1888858b15956b3b297908c669ba90213e4
#!/usr/bin/python import sys from typing import Any, Dict from requests.api import get import semver import requests repo: str = 'groovy-guru' owner: str = 'DontShaveTheYak' def do_action(action, version): function = getattr(version, action) new_version = function() print(f'{version} {action} to {new_version}') return new_version def get_response(url) -> Dict[str, Any]: response = requests.get(url) return response.json() def get_action(pull_request: str) -> str: valid_labels = ['major','minor','patch'] response = get_response(f"https://api.github.com/repos/{owner}/{repo}/pulls/{pull_request}") label = [label['name'] for label in response['labels'] if label['name'] in valid_labels][0] return label def set_output(name: str, value: str): print(f"::set-output name={name}::{value}") def get_latest_release() -> str: response = get_response(f"https://api.github.com/repos/{owner}/{repo}/releases") for release in response: if not release['draft'] and not release['prerelease']: return release raise Exception('Unable to find production relase.') latest_tag = sys.argv[1] pull_request = sys.argv[2] branch = sys.argv[3] action_methods = { 'patch': 'bump_patch', 'minor': 'bump_minor', 'major': 'bump_major' } if branch != "master": action_name = get_action(pull_request) action = action_methods[action_name] next_version: str = '' print(f'Latest tag is {latest_tag}') latest_release = get_latest_release() release_tag = latest_release['tag_name'] print(f'Latest release is {release_tag}') if branch == 'master': print("This release is a final release!") base_tag = latest_tag.split("-")[0] bump_rule = "None" set_output('next_tag', base_tag) sys.exit(0) if '-SNAPSHOT' in latest_tag: print('Checking if we can reuse latest tag.') latest_tag = latest_tag.split('-')[0] next_tag = semver.VersionInfo.parse(release_tag) next_tag = do_action(action, next_tag) latest_tag = semver.VersionInfo.parse(latest_tag) compare = semver.compare(str(latest_tag),str(next_tag)) next_tag = f'{next_tag}-SNAPSHOT' latest_tag = f'{latest_tag}-SNAPSHOT' if compare == -1: print(f'Creating {next_tag} because its version is higher than latest tag: {latest_tag}') next_version = next_tag elif compare == 1: print(f'Reusing latest tag ({latest_tag}) because next tag ({next_tag}) is lower.') next_version = latest_tag else: print(f'Reusing latest tag ({latest_tag}) because its version is equal to next tag ({next_tag})') next_version = latest_tag else: # create new snapshot tag and exit version = semver.VersionInfo.parse(latest_tag) new_tag = do_action(action, version) print(f'Creating new SNAPSHOT tag {new_tag}-SNAPSHOT') next_version = f'{new_tag}-SNAPSHOT' set_output('next_tag', next_version)
py
1a49b1f2150dbe6d9c695d2c9100c12d73289567
# ----------------------------------------------------------------------------------------- # Code taken from https://github.com/iwantooxxoox/Keras-OpenFace (with minor modifications) # ----------------------------------------------------------------------------------------- import tensorflow as tf import numpy as np import os from numpy import genfromtxt from keras.layers import Conv2D, ZeroPadding2D, Activation from keras.layers.normalization import BatchNormalization _FLOATX = 'float32' def variable(value, dtype=_FLOATX, name=None): v = tf.Variable(np.asarray(value, dtype=dtype), name=name) _get_session().run(v.initializer) return v def shape(x): return x.get_shape() def square(x): return tf.square(x) def zeros(shape, dtype=_FLOATX, name=None): return variable(np.zeros(shape), dtype, name) def concatenate(tensors, axis=-1): if axis < 0: axis = axis % len(tensors[0].get_shape()) return tf.concat(axis, tensors) def LRN2D(x): return tf.nn.lrn(x, alpha=1e-4, beta=0.75) def conv2d_bn( x, layer=None, cv1_out=None, cv1_filter=(1, 1), cv1_strides=(1, 1), cv2_out=None, cv2_filter=(3, 3), cv2_strides=(1, 1), padding=None, ): num = '' if cv2_out == None else '1' tensor = Conv2D(cv1_out, cv1_filter, strides=cv1_strides, name=layer+'_conv'+num)(x) tensor = BatchNormalization(axis=3, epsilon=0.00001, name=layer+'_bn'+num)(tensor) tensor = Activation('relu')(tensor) if padding == None: return tensor tensor = ZeroPadding2D(padding=padding)(tensor) if cv2_out == None: return tensor tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, name=layer+'_conv'+'2')(tensor) tensor = BatchNormalization(axis=3, epsilon=0.00001, name=layer+'_bn'+'2')(tensor) tensor = Activation('relu')(tensor) return tensor weights = [ 'conv1', 'bn1', 'conv2', 'bn2', 'conv3', 'bn3', 'inception_3a_1x1_conv', 'inception_3a_1x1_bn', 'inception_3a_pool_conv', 'inception_3a_pool_bn', 'inception_3a_5x5_conv1', 'inception_3a_5x5_conv2', 'inception_3a_5x5_bn1', 'inception_3a_5x5_bn2', 'inception_3a_3x3_conv1', 'inception_3a_3x3_conv2', 'inception_3a_3x3_bn1', 'inception_3a_3x3_bn2', 'inception_3b_3x3_conv1', 'inception_3b_3x3_conv2', 'inception_3b_3x3_bn1', 'inception_3b_3x3_bn2', 'inception_3b_5x5_conv1', 'inception_3b_5x5_conv2', 'inception_3b_5x5_bn1', 'inception_3b_5x5_bn2', 'inception_3b_pool_conv', 'inception_3b_pool_bn', 'inception_3b_1x1_conv', 'inception_3b_1x1_bn', 'inception_3c_3x3_conv1', 'inception_3c_3x3_conv2', 'inception_3c_3x3_bn1', 'inception_3c_3x3_bn2', 'inception_3c_5x5_conv1', 'inception_3c_5x5_conv2', 'inception_3c_5x5_bn1', 'inception_3c_5x5_bn2', 'inception_4a_3x3_conv1', 'inception_4a_3x3_conv2', 'inception_4a_3x3_bn1', 'inception_4a_3x3_bn2', 'inception_4a_5x5_conv1', 'inception_4a_5x5_conv2', 'inception_4a_5x5_bn1', 'inception_4a_5x5_bn2', 'inception_4a_pool_conv', 'inception_4a_pool_bn', 'inception_4a_1x1_conv', 'inception_4a_1x1_bn', 'inception_4e_3x3_conv1', 'inception_4e_3x3_conv2', 'inception_4e_3x3_bn1', 'inception_4e_3x3_bn2', 'inception_4e_5x5_conv1', 'inception_4e_5x5_conv2', 'inception_4e_5x5_bn1', 'inception_4e_5x5_bn2', 'inception_5a_3x3_conv1', 'inception_5a_3x3_conv2', 'inception_5a_3x3_bn1', 'inception_5a_3x3_bn2', 'inception_5a_pool_conv', 'inception_5a_pool_bn', 'inception_5a_1x1_conv', 'inception_5a_1x1_bn', 'inception_5b_3x3_conv1', 'inception_5b_3x3_conv2', 'inception_5b_3x3_bn1', 'inception_5b_3x3_bn2', 'inception_5b_pool_conv', 'inception_5b_pool_bn', 'inception_5b_1x1_conv', 'inception_5b_1x1_bn', 'dense_layer' ] conv_shape = { 'conv1': [64, 3, 7, 7], 'conv2': [64, 64, 1, 1], 'conv3': [192, 64, 3, 3], 'inception_3a_1x1_conv': [64, 192, 1, 1], 'inception_3a_pool_conv': [32, 192, 1, 1], 'inception_3a_5x5_conv1': [16, 192, 1, 1], 'inception_3a_5x5_conv2': [32, 16, 5, 5], 'inception_3a_3x3_conv1': [96, 192, 1, 1], 'inception_3a_3x3_conv2': [128, 96, 3, 3], 'inception_3b_3x3_conv1': [96, 256, 1, 1], 'inception_3b_3x3_conv2': [128, 96, 3, 3], 'inception_3b_5x5_conv1': [32, 256, 1, 1], 'inception_3b_5x5_conv2': [64, 32, 5, 5], 'inception_3b_pool_conv': [64, 256, 1, 1], 'inception_3b_1x1_conv': [64, 256, 1, 1], 'inception_3c_3x3_conv1': [128, 320, 1, 1], 'inception_3c_3x3_conv2': [256, 128, 3, 3], 'inception_3c_5x5_conv1': [32, 320, 1, 1], 'inception_3c_5x5_conv2': [64, 32, 5, 5], 'inception_4a_3x3_conv1': [96, 640, 1, 1], 'inception_4a_3x3_conv2': [192, 96, 3, 3], 'inception_4a_5x5_conv1': [32, 640, 1, 1,], 'inception_4a_5x5_conv2': [64, 32, 5, 5], 'inception_4a_pool_conv': [128, 640, 1, 1], 'inception_4a_1x1_conv': [256, 640, 1, 1], 'inception_4e_3x3_conv1': [160, 640, 1, 1], 'inception_4e_3x3_conv2': [256, 160, 3, 3], 'inception_4e_5x5_conv1': [64, 640, 1, 1], 'inception_4e_5x5_conv2': [128, 64, 5, 5], 'inception_5a_3x3_conv1': [96, 1024, 1, 1], 'inception_5a_3x3_conv2': [384, 96, 3, 3], 'inception_5a_pool_conv': [96, 1024, 1, 1], 'inception_5a_1x1_conv': [256, 1024, 1, 1], 'inception_5b_3x3_conv1': [96, 736, 1, 1], 'inception_5b_3x3_conv2': [384, 96, 3, 3], 'inception_5b_pool_conv': [96, 736, 1, 1], 'inception_5b_1x1_conv': [256, 736, 1, 1], } def load_weights(): weightsDir = './weights' fileNames = filter(lambda f: not f.startswith('.'), os.listdir(weightsDir)) paths = {} weights_dict = {} for n in fileNames: paths[n.replace('.csv', '')] = weightsDir + '/' + n for name in weights: if 'conv' in name: conv_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None) conv_w = np.reshape(conv_w, conv_shape[name]) conv_w = np.transpose(conv_w, (2, 3, 1, 0)) conv_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None) weights_dict[name] = [conv_w, conv_b] elif 'bn' in name: bn_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None) bn_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None) bn_m = genfromtxt(paths[name + '_m'], delimiter=',', dtype=None) bn_v = genfromtxt(paths[name + '_v'], delimiter=',', dtype=None) weights_dict[name] = [bn_w, bn_b, bn_m, bn_v] elif 'dense' in name: dense_w = genfromtxt(weightsDir+'/dense_w.csv', delimiter=',', dtype=None) dense_w = np.reshape(dense_w, (128, 736)) dense_w = np.transpose(dense_w, (1, 0)) dense_b = genfromtxt(weightsDir+'/dense_b.csv', delimiter=',', dtype=None) weights_dict[name] = [dense_w, dense_b] return weights_dict
py
1a49b27a80e0949531ab73c1a8fc99a575a178d5
from setuptools import setup, find_packages from os import path from io import open here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='shconfparser', version='2.2.3', description="It's a Network configuration parser, which translates the show outputs", long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/network-tools/shconfparser', author='Kiran Kumar Kotari', author_email='[email protected]', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], keywords='network conf parser translator cisco show output parser', packages=find_packages(exclude=['tests', 'data', 'asserts']), )
py
1a49b2b4cb2727bc13c8f00ab7c80d75c805a6a1
import jax.numpy as jnp import numpy as np import netket as nk import flax.linen as nn class test(nn.Module): @nn.compact def __call__(self, x): nothing = self.param("nothing", lambda *args: jnp.ones(1)) if len(x.shape) != 1: return jnp.array(x.size * [1.0]) return 1.0 class test2(nn.Module): @nn.compact def __call__(self, x): nothing = self.param("nothing", lambda *args: jnp.ones(1)) sol = jnp.sum(nothing ** 2 * x, axis=-1) return sol # continuous preparations def v1(x): return 1 / jnp.sqrt(2 * jnp.pi) * jnp.sum(jnp.exp(-0.5 * ((x - 2.5) ** 2)), axis=-1) def v2(x): return 1 / jnp.sqrt(2 * jnp.pi) * jnp.sum(jnp.exp(-0.5 * ((x - 2.5) ** 2)), axis=-1) hilb = nk.hilbert.Particle(N=1, L=5, pbc=True) pot = nk.operator.PotentialEnergy(hilb, v1) kin = nk.operator.KineticEnergy(hilb, mass=1.0) e = pot + kin sab = nk.sampler.MetropolisGaussian(hilb, sigma=1.0, n_chains=16, n_sweeps=1) model = test() model2 = test2() vs_continuous = nk.vqs.MCState(sab, model, n_samples=10 ** 6, n_discard=2000) vs_continuous2 = nk.vqs.MCState(sab, model2, n_samples=10 ** 7, n_discard=2000) def test_expect(): x = vs_continuous2.samples.reshape(-1, 1) sol = vs_continuous.expect(pot) O_stat, O_grad = vs_continuous2.expect_and_grad(e) O_grad, _ = nk.jax.tree_ravel(O_grad) O_grad_exact = 2 * jnp.dot(x.T, (v1(x) - jnp.mean(v1(x), axis=0))) / x.shape[0] r""" :math:`<V> = \int_0^5 dx V(x) |\psi(x)|^2 / \int_0^5 |\psi(x)|^2 = 0.1975164 (\psi = 1)` :math:`<\nabla V> = \nabla_p \int_0^5 dx V(x) |\psi(x)|^2 / \int_0^5 |\psi(x)|^2 = -0.140256 (\psi = \exp(p^2 x))` """ np.testing.assert_allclose(0.1975164, sol.mean, atol=10 ** (-3)) np.testing.assert_allclose(-0.140256, 2 * O_grad, atol=10 ** (-3))
bzl
1a49b2ca1471f4976d197ee3cbfc015998e4f42a
# pylint: disable=g-bad-file-header # Copyright 2016 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Configuring the C++ toolchain on Windows.""" load( "@bazel_tools//tools/cpp:lib_cc_configure.bzl", "escape_string", "auto_configure_fail", "auto_configure_warning", "get_env_var", "which", "which_cmd", "execute", "tpl", "is_cc_configure_debug", ) def _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = False): """Return the content of msys crosstool which is still the default CROSSTOOL on Windows.""" bazel_sh = get_env_var(repository_ctx, "BAZEL_SH").replace("\\", "/").lower() tokens = bazel_sh.rsplit("/", 1) prefix = "mingw64" if use_mingw else "usr" msys_root = None if tokens[0].endswith("/usr/bin"): msys_root = tokens[0][:len(tokens[0]) - len("usr/bin")] elif tokens[0].endswith("/bin"): msys_root = tokens[0][:len(tokens[0]) - len("bin")] if not msys_root: auto_configure_fail( "Could not determine MSYS/Cygwin root from BAZEL_SH (%s)" % bazel_sh) escaped_msys_root = escape_string(msys_root) return ((( ' abi_version: "local"\n' + ' abi_libc_version: "local"\n' + ' builtin_sysroot: ""\n' + ' compiler: "msys-gcc"\n' + ' host_system_name: "local"\n' + ' needsPic: false\n' + ' target_libc: "msys"\n' + ' target_cpu: "x64_windows"\n' + ' target_system_name: "local"\n') if not use_mingw else '') + ' tool_path { name: "ar" path: "%s%s/bin/ar" }\n' % (escaped_msys_root, prefix) + ' tool_path { name: "compat-ld" path: "%s%s/bin/ld" }\n' % (escaped_msys_root, prefix) + ' tool_path { name: "cpp" path: "%s%s/bin/cpp" }\n' % (escaped_msys_root, prefix) + ' tool_path { name: "dwp" path: "%s%s/bin/dwp" }\n' % (escaped_msys_root, prefix) + ' tool_path { name: "gcc" path: "%s%s/bin/gcc" }\n' % (escaped_msys_root, prefix) + ' cxx_flag: "-std=gnu++0x"\n' + ' linker_flag: "-lstdc++"\n' + ' cxx_builtin_include_directory: "%s%s/"\n' % (escaped_msys_root, prefix) + ' tool_path { name: "gcov" path: "%s%s/bin/gcov" }\n' % (escaped_msys_root, prefix) + ' tool_path { name: "ld" path: "%s%s/bin/ld" }\n' % (escaped_msys_root, prefix) + ' tool_path { name: "nm" path: "%s%s/bin/nm" }\n' % (escaped_msys_root, prefix) + ' tool_path { name: "objcopy" path: "%s%s/bin/objcopy" }\n' % (escaped_msys_root, prefix) + ' objcopy_embed_flag: "-I"\n' + ' objcopy_embed_flag: "binary"\n' + ' tool_path { name: "objdump" path: "%s%s/bin/objdump" }\n' % (escaped_msys_root, prefix) + ' tool_path { name: "strip" path: "%s%s/bin/strip" }'% (escaped_msys_root, prefix) + ' feature { name: "targets_windows" implies: "copy_dynamic_libraries_to_binary" enabled: true }' + ' feature { name: "copy_dynamic_libraries_to_binary" }' ) def _get_system_root(repository_ctx): r"""Get System root path on Windows, default is C:\\Windows. Doesn't %-escape the result.""" if "SYSTEMROOT" in repository_ctx.os.environ: return escape_string(repository_ctx.os.environ["SYSTEMROOT"]) auto_configure_warning("SYSTEMROOT is not set, using default SYSTEMROOT=C:\\Windows") return "C:\\Windows" def _find_cuda(repository_ctx): """Find out if and where cuda is installed. Doesn't %-escape the result.""" if "CUDA_PATH" in repository_ctx.os.environ: return repository_ctx.os.environ["CUDA_PATH"] nvcc = which(repository_ctx, "nvcc.exe") if nvcc: return nvcc[:-len("/bin/nvcc.exe")] return None def _find_python(repository_ctx): """Find where is python on Windows. Doesn't %-escape the result.""" if "BAZEL_PYTHON" in repository_ctx.os.environ: python_binary = repository_ctx.os.environ["BAZEL_PYTHON"] if not python_binary.endswith(".exe"): python_binary = python_binary + ".exe" return python_binary auto_configure_warning("'BAZEL_PYTHON' is not set, start looking for python in PATH.") python_binary = which_cmd(repository_ctx, "python.exe") auto_configure_warning("Python found at %s" % python_binary) return python_binary def _add_system_root(repository_ctx, env): r"""Running VCVARSALL.BAT and VCVARSQUERYREGISTRY.BAT need %SYSTEMROOT%\\system32 in PATH.""" if "PATH" not in env: env["PATH"] = "" env["PATH"] = env["PATH"] + ";" + _get_system_root(repository_ctx) + "\\system32" return env def find_vc_path(repository_ctx): """Find Visual C++ build tools install path. Doesn't %-escape the result.""" # 1. Check if BAZEL_VC or BAZEL_VS is already set by user. if "BAZEL_VC" in repository_ctx.os.environ: return repository_ctx.os.environ["BAZEL_VC"] if "BAZEL_VS" in repository_ctx.os.environ: return repository_ctx.os.environ["BAZEL_VS"] + "\\VC\\" auto_configure_warning("'BAZEL_VC' is not set, " + "start looking for the latest Visual C++ installed.") # 2. Check if VS%VS_VERSION%COMNTOOLS is set, if true then try to find and use # vcvarsqueryregistry.bat to detect VC++. auto_configure_warning("Looking for VS%VERSION%COMNTOOLS environment variables, " + "eg. VS140COMNTOOLS") for vscommontools_env in ["VS140COMNTOOLS", "VS120COMNTOOLS", "VS110COMNTOOLS", "VS100COMNTOOLS", "VS90COMNTOOLS"]: if vscommontools_env not in repository_ctx.os.environ: continue vcvarsqueryregistry = repository_ctx.os.environ[vscommontools_env] + "\\vcvarsqueryregistry.bat" if not repository_ctx.path(vcvarsqueryregistry).exists: continue repository_ctx.file("get_vc_dir.bat", "@echo off\n" + "call \"" + vcvarsqueryregistry + "\"\n" + "echo %VCINSTALLDIR%", True) env = _add_system_root(repository_ctx, repository_ctx.os.environ) vc_dir = execute(repository_ctx, ["./get_vc_dir.bat"], environment=env) auto_configure_warning("Visual C++ build tools found at %s" % vc_dir) return vc_dir # 3. User might clean up all environment variables, if so looking for Visual C++ through registry. # Works for all VS versions, including Visual Studio 2017. auto_configure_warning("Looking for Visual C++ through registry") reg_binary = _get_system_root(repository_ctx) + "\\system32\\reg.exe" vc_dir = None for key, suffix in (("VC7", ""), ("VS7", "\\VC")): for version in ["15.0", "14.0", "12.0", "11.0", "10.0", "9.0", "8.0"]: if vc_dir: break result = repository_ctx.execute([reg_binary, "query", "HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\" + key, "/v", version]) if is_cc_configure_debug(repository_ctx): auto_configure_warning("registry query result for VC %s:\n\nSTDOUT(start)\n%s\nSTDOUT(end)\nSTDERR(start):\n%s\nSTDERR(end)\n" % (version, result.stdout, result.stderr)) if not result.stderr: for line in result.stdout.split("\n"): line = line.strip() if line.startswith(version) and line.find("REG_SZ") != -1: vc_dir = line[line.find("REG_SZ") + len("REG_SZ"):].strip() + suffix if not vc_dir: return None auto_configure_warning("Visual C++ build tools found at %s" % vc_dir) return vc_dir def _is_vs_2017(vc_path): """Check if the installed VS version is Visual Studio 2017.""" # In VS 2017, the location of VC is like: # C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\ # In VS 2015 or older version, it is like: # C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\ return vc_path.find("2017") != -1 def _find_vcvarsall_bat_script(repository_ctx, vc_path): """Find vcvarsall.bat script. Doesn't %-escape the result.""" if _is_vs_2017(vc_path): vcvarsall = vc_path + "\\Auxiliary\\Build\\VCVARSALL.BAT" else: vcvarsall = vc_path + "\\VCVARSALL.BAT" if not repository_ctx.path(vcvarsall).exists: return None return vcvarsall def _find_env_vars(repository_ctx, vc_path): """Get environment variables set by VCVARSALL.BAT. Doesn't %-escape the result!""" vcvarsall = _find_vcvarsall_bat_script(repository_ctx, vc_path) repository_ctx.file("get_env.bat", "@echo off\n" + "call \"" + vcvarsall + "\" amd64 > NUL \n" + "echo PATH=%PATH%,INCLUDE=%INCLUDE%,LIB=%LIB% \n", True) env = _add_system_root(repository_ctx, {"PATH": "", "INCLUDE": "", "LIB": ""}) envs = execute(repository_ctx, ["./get_env.bat"], environment=env).split(",") env_map = {} for env in envs: key, value = env.split("=", 1) env_map[key] = escape_string(value.replace("\\", "\\\\")) return env_map def find_msvc_tool(repository_ctx, vc_path, tool): """Find the exact path of a specific build tool in MSVC. Doesn't %-escape the result.""" tool_path = "" if _is_vs_2017(vc_path): # For VS 2017, the tools are under a directory like: # C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Tools\MSVC\14.10.24930\bin\HostX64\x64 dirs = repository_ctx.path(vc_path + "\\Tools\\MSVC").readdir() if len(dirs) < 1: return None # Normally there should be only one child directory under %VC_PATH%\TOOLS\MSVC, # but iterate every directory to be more robust. for path in dirs: tool_path = str(path) + "\\bin\\HostX64\\x64\\" + tool if repository_ctx.path(tool_path).exists: break else: # For VS 2015 and older version, the tools are under: # C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64 tool_path = vc_path + "\\bin\\amd64\\" + tool if not repository_ctx.path(tool_path).exists: return None return tool_path def _find_missing_vc_tools(repository_ctx, vc_path): """Check if any required tool is missing under given VC path.""" missing_tools = [] if not _find_vcvarsall_bat_script(repository_ctx, vc_path): missing_tools.append("VCVARSALL.BAT") for tool in ["cl.exe", "link.exe", "lib.exe", "ml64.exe"]: if not find_msvc_tool(repository_ctx, vc_path, tool): missing_tools.append(tool) return missing_tools def _is_support_whole_archive(repository_ctx, vc_path): """Run MSVC linker alone to see if it supports /WHOLEARCHIVE.""" env = repository_ctx.os.environ if "NO_WHOLE_ARCHIVE_OPTION" in env and env["NO_WHOLE_ARCHIVE_OPTION"] == "1": return False linker = find_msvc_tool(repository_ctx, vc_path, "link.exe") result = execute(repository_ctx, [linker], expect_failure = True) return result.find("/WHOLEARCHIVE") != -1 def _is_support_debug_fastlink(repository_ctx, vc_path): """Run MSVC linker alone to see if it supports /DEBUG:FASTLINK.""" linker = find_msvc_tool(repository_ctx, vc_path, "link.exe") result = execute(repository_ctx, [linker], expect_failure = True) return result.find("/DEBUG[:{FASTLINK|FULL|NONE}]") != -1 def _is_use_msvc_wrapper(repository_ctx): """Returns True if USE_MSVC_WRAPPER is set to 1.""" env = repository_ctx.os.environ return "USE_MSVC_WRAPPER" in env and env["USE_MSVC_WRAPPER"] == "1" def _get_compilation_mode_content(): """Return the content for adding flags for different compilation modes when using MSVC wrapper.""" return "\n".join([ " compilation_mode_flags {", " mode: DBG", " compiler_flag: '-Xcompilation-mode=dbg'", " linker_flag: '-Xcompilation-mode=dbg'", " }", " compilation_mode_flags {", " mode: FASTBUILD", " compiler_flag: '-Xcompilation-mode=fastbuild'", " linker_flag: '-Xcompilation-mode=fastbuild'", " }", " compilation_mode_flags {", " mode: OPT", " compiler_flag: '-Xcompilation-mode=opt'", " linker_flag: '-Xcompilation-mode=opt'", " }"]) def _escaped_cuda_compute_capabilities(repository_ctx): """Returns a %-escaped list of strings representing cuda compute capabilities.""" if "CUDA_COMPUTE_CAPABILITIES" not in repository_ctx.os.environ: return ["3.5", "5.2"] capabilities_str = escape_string(repository_ctx.os.environ["CUDA_COMPUTE_CAPABILITIES"]) capabilities = capabilities_str.split(",") for capability in capabilities: # Workaround for Skylark's lack of support for regex. This check should # be equivalent to checking: # if re.match("[0-9]+.[0-9]+", capability) == None: parts = capability.split(".") if len(parts) != 2 or not parts[0].isdigit() or not parts[1].isdigit(): auto_configure_fail("Invalid compute capability: %s" % capability) return capabilities def configure_windows_toolchain(repository_ctx): """Configure C++ toolchain on Windows.""" repository_ctx.symlink(Label("@bazel_tools//tools/cpp:BUILD.static"), "BUILD") vc_path = find_vc_path(repository_ctx) missing_tools = None vc_installation_error_script = "vc_installation_error.bat" if not vc_path: tpl(repository_ctx, vc_installation_error_script, {"%{vc_error_message}" : ""}) else: missing_tools = _find_missing_vc_tools(repository_ctx, vc_path) if missing_tools: tpl(repository_ctx, vc_installation_error_script, { "%{vc_error_message}" : "\r\n".join([ "echo. 1>&2", "echo Visual C++ build tools seems to be installed at %s 1>&2" % vc_path, "echo But Bazel can't find the following tools: 1>&2", "echo %s 1>&2" % ", ".join(missing_tools), "echo. 1>&2", ])}) if not vc_path or missing_tools: tpl(repository_ctx, "CROSSTOOL", { "%{cpu}": "x64_windows", "%{default_toolchain_name}": "msvc_x64", "%{toolchain_name}": "msys_x64", "%{msvc_env_tmp}": "", "%{msvc_env_path}": "", "%{msvc_env_include}": "", "%{msvc_env_lib}": "", "%{msvc_cl_path}": vc_installation_error_script, "%{msvc_ml_path}": vc_installation_error_script, "%{msvc_link_path}": vc_installation_error_script, "%{msvc_lib_path}": vc_installation_error_script, "%{dbg_mode_debug}": "/DEBUG", "%{fastbuild_mode_debug}": "/DEBUG", "%{compilation_mode_content}": "", "%{content}": _get_escaped_windows_msys_crosstool_content(repository_ctx), "%{msys_x64_mingw_content}": _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = True), "%{opt_content}": "", "%{dbg_content}": "", "%{link_content}": "", "%{cxx_builtin_include_directory}": "", "%{coverage}": "", }) return env = _find_env_vars(repository_ctx, vc_path) escaped_paths = escape_string(env["PATH"]) escaped_include_paths = escape_string(env["INCLUDE"]) escaped_lib_paths = escape_string(env["LIB"]) escaped_tmp_dir = escape_string( get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace("\\", "\\\\")) msvc_cl_path = find_msvc_tool(repository_ctx, vc_path, "cl.exe").replace("\\", "/") msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe").replace("\\", "/") msvc_link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe").replace("\\", "/") msvc_lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe").replace("\\", "/") escaped_cxx_include_directories = [] compilation_mode_content = "" if _is_use_msvc_wrapper(repository_ctx): if _is_support_whole_archive(repository_ctx, vc_path): support_whole_archive = "True" else: support_whole_archive = "False" nvcc_tmp_dir_name = escaped_tmp_dir + "\\\\nvcc_inter_files_tmp_dir" # Make sure nvcc.exe is in PATH cuda_path = _find_cuda(repository_ctx) if cuda_path: escaped_paths = escape_string(cuda_path.replace("\\", "\\\\") + "/bin;") + escaped_paths escaped_compute_capabilities = _escaped_cuda_compute_capabilities(repository_ctx) tpl(repository_ctx, "wrapper/bin/pydir/msvc_tools.py", { "%{lib_tool}": escape_string(msvc_lib_path), "%{support_whole_archive}": support_whole_archive, "%{cuda_compute_capabilities}": ", ".join( ["\"%s\"" % c for c in escaped_compute_capabilities]), "%{nvcc_tmp_dir_name}": nvcc_tmp_dir_name, }) # nvcc will generate some source files under %{nvcc_tmp_dir_name} # The generated files are guranteed to have unique name, so they can share the same tmp directory escaped_cxx_include_directories += [ "cxx_builtin_include_directory: \"%s\"" % nvcc_tmp_dir_name ] msvc_wrapper = repository_ctx.path(Label("@bazel_tools//tools/cpp:CROSSTOOL")).dirname.get_child("wrapper").get_child("bin") for f in ["msvc_cl.bat", "msvc_link.bat", "msvc_nop.bat"]: repository_ctx.symlink(msvc_wrapper.get_child(f), "wrapper/bin/" + f) msvc_wrapper = msvc_wrapper.get_child("pydir") for f in ["msvc_cl.py", "msvc_link.py"]: repository_ctx.symlink(msvc_wrapper.get_child(f), "wrapper/bin/pydir/" + f) python_binary = _find_python(repository_ctx) tpl(repository_ctx, "wrapper/bin/call_python.bat", {"%{python_binary}": escape_string(python_binary)}) msvc_cl_path = "wrapper/bin/msvc_cl.bat" msvc_link_path = "wrapper/bin/msvc_link.bat" msvc_lib_path = "wrapper/bin/msvc_link.bat" compilation_mode_content = _get_compilation_mode_content() for path in escaped_include_paths.split(";"): if path: escaped_cxx_include_directories.append("cxx_builtin_include_directory: \"%s\"" % path) support_debug_fastlink = _is_support_debug_fastlink(repository_ctx, vc_path) tpl(repository_ctx, "CROSSTOOL", { "%{cpu}": "x64_windows", "%{default_toolchain_name}": "msvc_x64", "%{toolchain_name}": "msys_x64", "%{msvc_env_tmp}": escaped_tmp_dir, "%{msvc_env_path}": escaped_paths, "%{msvc_env_include}": escaped_include_paths, "%{msvc_env_lib}": escaped_lib_paths, "%{msvc_cl_path}": msvc_cl_path, "%{msvc_ml_path}": msvc_ml_path, "%{msvc_link_path}": msvc_link_path, "%{msvc_lib_path}": msvc_lib_path, "%{dbg_mode_debug}": "/DEBUG:FULL" if support_debug_fastlink else "/DEBUG", "%{fastbuild_mode_debug}": "/DEBUG:FASTLINK" if support_debug_fastlink else "/DEBUG", "%{compilation_mode_content}": compilation_mode_content, "%{content}": _get_escaped_windows_msys_crosstool_content(repository_ctx), "%{msys_x64_mingw_content}": _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = True), "%{opt_content}": "", "%{dbg_content}": "", "%{link_content}": "", "%{cxx_builtin_include_directory}": "\n".join(escaped_cxx_include_directories), "%{coverage}": "", })
py
1a49b33d2f5e13a06ce11cefed7f8868d5269300
""" Commands for home spaces/rooms. """ from evennia import CmdSet from commands.base import ArxCommand from django.conf import settings from world.dominion.models import LIFESTYLES from django.db.models import Q from evennia.objects.models import ObjectDB from world.dominion.models import AssetOwner, Organization, CraftingRecipe from commands.base_commands.crafting import CmdCraft from commands.base_commands.overrides import CmdDig from server.utils.prettytable import PrettyTable from server.utils.arx_utils import inform_staff, raw from evennia.utils import utils from evennia.utils.evtable import EvTable from typeclasses.characters import Character import re # error return function, needed by Extended Look command AT_SEARCH_RESULT = utils.variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1)) DESC_COST = 0 class HomeCmdSet(CmdSet): """CmdSet for a home spaces.""" key = "HomeCmdSet" priority = 101 duplicates = False no_exits = False no_objs = False def at_cmdset_creation(self): """ This is the only method defined in a cmdset, called during its creation. It should populate the set with command instances. Note that it can also take other cmdsets as arguments, which will be used by the character default cmdset to add all of these onto the internal cmdset stack. They will then be able to removed or replaced as needed. """ self.add(CmdManageHome()) class CmdManageHome(ArxCommand): """ +home Usage: +home +home/lock +home/unlock +home/key <character> +home/passmsg <message people see when entering> +home/lockmsg <message those who can't enter see> +home/rmkey <character> +home/lifestyle <rating> Controls your home. /passmsg is for use of the 'pass' command to go through a locked door. /lockmsg is for those who are denied entry. /lifestyle is to control how much silver you spend per week and earn prestige. """ key = "+home" # aliases = ["@home"] locks = "cmd:all()" help_category = "Home" def display_lifestyles(self): """Displays table of Dominion lifestyles with the character's current selection""" caller = self.caller table = PrettyTable(["{wRating{n", "{wCost{n", "{wPrestige{n"]) caller.msg("{wLifestyles:{n") for rating in LIFESTYLES: num = str(rating) if caller.player_ob.Dominion.lifestyle_rating == rating: num += '{w*{n' table.add_row([num, LIFESTYLES[rating][0], LIFESTYLES[rating][1]]) caller.msg(str(table), options={'box': True}) def func(self): """Execute command.""" caller = self.caller loc = caller.location entrances = loc.entrances owners = loc.db.owners or [] keylist = loc.db.keylist or [] if caller not in owners and not caller.check_permstring("builders"): caller.msg("You are not the owner of this room.") return if not self.args and not self.switches: locked = "{rlocked{n" if loc.db.locked else "{wunlocked{n" caller.msg("Your home is currently %s." % locked) caller.msg("{wOwners:{n %s" % ", ".join(str(ob) for ob in owners)) caller.msg("{wCharacters who have keys:{n %s" % ", ".join(str(ob) for ob in keylist)) entrance = entrances[0] entmsg = entrance.db.success_traverse or "" errmsg = entrance.db.err_traverse or "" caller.msg("{wMessage upon passing through locked door:{n %s" % entmsg) caller.msg("{wMessage upon being denied access:{n %s" % errmsg) return if "unlock" in self.switches: # we only show as locked if -all- entrances are locked for ent in entrances: ent.unlock_exit() loc.db.locked = False caller.msg("Your house is now unlocked.") return if "lock" in self.switches: loc.db.locked = True caller.msg("Your house is now locked.") for ent in entrances: ent.lock_exit() return if "lifestyle" in self.switches and not self.args: # list lifestyles self.display_lifestyles() return if not self.args: caller.msg("You must provide an argument to the command.") return if "lockmsg" in self.switches: for r_exit in entrances: r_exit.db.err_traverse = self.args caller.msg("{wThe message those who can't enter now see is{n: %s" % self.args) return if "passmsg" in self.switches: for r_exit in entrances: r_exit.db.success_traverse = self.args caller.msg("{wThe message those who enter will now see is{n: %s" % self.args) return if "lifestyle" in self.switches or "lifestyles" in self.switches: if caller not in owners: caller.msg("You may only set the lifestyle rating for an owner.") return try: LIFESTYLES[int(self.args)] except (KeyError, TypeError, ValueError): caller.msg("%s is not a valid lifestyle." % self.args) self.display_lifestyles() return caller.player_ob.Dominion.lifestyle_rating = int(self.args) caller.player_ob.Dominion.save() caller.msg("Your lifestyle rating has been set to %s." % self.args) return player = caller.player.search(self.lhs) if not player: return char = player.char_ob if not char: caller.msg("No character found.") return keys = char.db.keylist or [] if "key" in self.switches: if loc in keys and char in keylist: caller.msg("They already have a key to here.") return if loc not in keys: keys.append(loc) char.db.keylist = keys if char not in keylist: keylist.append(char) loc.db.keylist = keylist char.msg("{c%s{w has granted you a key to %s." % (caller, loc)) caller.msg("{wYou have granted {c%s{w a key.{n" % char) return if "rmkey" in self.switches: if loc not in keys and char not in keylist: caller.msg("They don't have a key to here.") return if loc in keys: keys.remove(loc) char.db.keylist = keys if char in keylist: keylist.remove(char) loc.db.keylist = keylist char.msg("{c%s{w has removed your access to %s." % (caller, loc)) caller.msg("{wYou have removed {c%s{w's key.{n" % char) return class CmdAllowBuilding(ArxCommand): """ @allowbuilding Usage: @allowbuilding @allowbuilding all[=<cost>] @allowbuilding <name>[,<name2>,...][=<cost>] @allowbuilding/clear Flags your current room as permitting characters to build there. The name provided can either be a character or organization name. Cost is 100 economic resources unless specified otherwise. Max rooms that anyone can build off here is set by the 'expansion_cap' attribute, defaults to 1 if not defined. Tracked separately for each org/player, so any number of people could build 1 room off a room with expansion_cap of 1 in a room, as long as they are permitted to do so. """ key = "@allowbuilding" locks = "cmd:perm(Builders)" help_category = "Building" def func(self): """Execute command.""" caller = self.caller loc = caller.location permits = loc.db.permitted_builders or {} if not self.args and not self.switches: table = PrettyTable(["Name", "Cost"]) for permit_id in permits: if permit_id == "all": owner = "all" else: owner = AssetOwner.objects.get(id=permit_id) cost = permits[permit_id] table.add_row([str(owner), cost]) caller.msg(str(table)) return if "clear" in self.switches: loc.db.permitted_builders = {} caller.msg("Perms wiped.") return cost = self.rhs and int(self.rhs) or 100 for name in self.lhslist: if name == "all": permits["all"] = cost continue try: owner = AssetOwner.objects.get(Q(organization_owner__name__iexact=name) | Q(player__player__username__iexact=name)) except AssetOwner.DoesNotExist: caller.msg("No owner by name of %s." % name) continue permits[owner.id] = cost loc.db.permitted_builders = permits caller.msg("Perms set.") return class CmdBuildRoom(CmdDig): """ +buildroom - build and connect new rooms to the current one Usage: +buildroom roomname=exit_to_there[;alias], exit_to_here[;alias] +buildroom/org orgname/roomname=[exits] Examples: +buildroom kitchen = north;n, south;s +buildroom sheer cliff= climb up, climb down +buildroom/org velenosa/dungeon=door;d, out;o This command is a convenient way to build rooms quickly; it creates the new room and you can optionally set up exits back and forth between your current room and the new one. You can add as many aliases as you like to the name of the room and the exits in question; an example would be 'north;no;n'. """ key = "+buildroom" locks = "cmd:all()" help_category = "Home" help_entry_tags = ["housing"] # noinspection PyAttributeOutsideInit def func(self): """Do the digging. Inherits variables from ObjManipCommand.parse()""" caller = self.caller loc = caller.location # lots of checks and shit here permits = loc.db.permitted_builders or {} if not permits: caller.msg("No one is currently allowed to build a house from here.") return expansions = loc.db.expansions or {} max_expansions = loc.db.expansion_cap or 20 assets = None # base cost = 1000 dompc = caller.player_ob.Dominion if "org" in self.switches: # max_rooms = 100 try: largs = self.lhs.split("/") orgname = largs[0] roomname = largs[1] except IndexError: caller.msg("Please specify orgname/roomname.") return try: org = Organization.objects.get(Q(name__iexact=orgname) & Q(members__player=dompc) & Q(members__deguilded=False)) if not org.access(caller, 'build'): caller.msg("You are not permitted to build for this org.") return self.lhs = roomname self.lhslist = [roomname] self.args = "%s=%s" % (self.lhs, self.rhs) # fix args for CmdDig self.parse() assets = org.assets cost = permits[assets.id] except KeyError: if "all" not in permits: caller.msg("That org is not permitted to build here.") return cost = permits["all"] except Organization.DoesNotExist: caller.msg("No org by that name: %s." % orgname) return else: # max_rooms = 3 assets = dompc.assets if assets.id in permits: cost = permits[assets.id] else: if "all" not in permits: caller.msg("You are not allowed to build here.") return cost = permits["all"] try: if expansions.get(assets.id, 0) >= max_expansions: caller.msg("You have built as many rooms from this space as you are allowed.") return except (AttributeError, TypeError, ValueError): caller.msg("{rError logged.{n") inform_staff("Room %s has an invalid expansions attribute." % loc.id) return if not self.lhs: caller.msg("The cost for you to build from this room is %s." % cost) return if cost > assets.economic: noun = "you" if dompc.assets == assets else str(assets) caller.msg("It would cost %s %s to build here, but only have %s." % (noun, cost, assets.economic)) if noun != "you": caller.msg("Deposit resources into the account of %s." % noun) return tagname = "%s_owned_room" % str(assets) # because who fucking cares # if tagname not in loc.tags.all() and ( # ObjectDB.objects.filter(Q(db_typeclass_path=settings.BASE_ROOM_TYPECLASS) # & Q(db_tags__db_key__iexact=tagname) # ).count() > max_rooms): # caller.msg("You have as many rooms as you are allowed.") # return if not self.rhs or len(self.rhslist) < 2: caller.msg("You must specify an exit and return exit for the new room.") return if not re.findall('^[\-\w\'{\[,%;|# ]+$', self.lhs) or not re.findall('^[\-\w\'{\[,%;|<># ]+$', self.rhs): caller.msg("Invalid characters entered for names or exits.") return new_room = CmdDig.func(self) if not new_room: return assets.economic -= cost assets.save() # do setup shit for new room here new_room.db.room_owner = assets.id new_room.tags.add("player_made_room") new_room.tags.add(tagname) new_room.tags.add("private") new_room.db.expansion_cap = 20 new_room.db.expansions = {} new_room.db.cost_increase_per_expansion = 25 cost_increase = loc.db.cost_increase_per_expansion or 0 new_room.db.permitted_builders = {assets.id: cost + cost_increase} new_room.db.x_coord = loc.db.x_coord new_room.db.y_coord = loc.db.y_coord my_expansions = expansions.get(assets.id, 0) + 1 expansions[assets.id] = my_expansions loc.db.expansions = expansions new_room.name = new_room.name # this will setup .db.colored_name and strip ansi from key if cost_increase and assets.id in permits: permits[assets.id] += cost_increase loc.db.permitted_builders = permits class CmdManageRoom(ArxCommand): """ +manageroom Usage: +manageroom +manageroom/name <name> +manageroom/desc <description> +manageroom/springdesc <description> +manageroom/summerdesc <description> +manageroom/falldesc <description> +manageroom/winterdesc <description> +manageroom/exitname <exit>=<new name> +manageroom/addhome <owner> +manageroom/confirmhome <owner> +manageroom/rmhome <owner> +manageroom/addshop <owner> +manageroom/confirmshop <owner> +manageroom/rmshop <owner> +manageroom/toggleprivate +manageroom/setbarracks +manageroom/addbouncer <character> +manageroom/rmbouncer <character> +manageroom/adddecorator <character> +manageroom/rmdecorator <character> +manageroom/ban <character> +manageroom/unban <character> +manageroom/boot <character>=<exit> Flags your current room as permitting characters to build there. Cost is 100 economic resources unless specified otherwise. To set a seasonal description for your room, use /springdesc, /summerdesc, etc. /desc will always be shown as a fallback otherwise. You can also embed special time markers in your room description, like this: ``` <night>In the darkness, the forest looks foreboding.</night>. <morning>Birds are chirping and whatnot.</morning> <afternoon>Birds are no longer chirping.</morning> <evening>THEY WILL NEVER CHIRP AGAIN.</evening> ``` Text marked this way will only display when the server is truly at the given timeslot. The available times are night, morning, afternoon and evening. Note that `@detail`, seasons and time-of-day slots only work on rooms in this version of the `@desc` command. Owners can appoint characters to be decorators or bouncers, to allow them to use commands while not owners. The ban switch prevents characters from being able to enter the room. The boot switch removes characters from the room. Bouncers are able to use ban and boot. Decorators are permitted to use the desc switches. """ key = "+manageroom" locks = "cmd:all()" help_category = "Home" desc_switches = ("desc", "winterdesc", "springdesc", "summerdesc", "falldesc") bouncer_switches = ("ban", "unban", "boot") personnel_switches = ("addbouncer", "rmbouncer", "adddecorator", "rmdecorator") help_entry_tags = ["housing"] def check_perms(self): """Checks the permissions for the room""" caller = self.caller loc = caller.location if not self.switches or set(self.switches) & set(self.bouncer_switches): if caller in loc.bouncers: return True if not self.switches or set(self.switches) & set(self.desc_switches): if caller in loc.decorators: return True try: owner = AssetOwner.objects.get(id=loc.db.room_owner) except AssetOwner.DoesNotExist: caller.msg("No owner is defined here.") return org = owner.organization_owner if not org and not (owner == caller.player_ob.Dominion.assets or ('confirmhome' in self.switches or 'confirmshop' in self.switches)): caller.msg("You are not the owner here.") return if org and not (org.access(caller, 'build') or ('confirmhome' in self.switches or 'confirmshop' in self.switches)): caller.msg("You do not have permission to build here.") return return True def func(self): """Execute command.""" caller = self.caller loc = caller.location if not self.check_perms(): return if not self.switches: # display who has a home here, who has a shop here owners = loc.db.owners or [] caller.msg("{wHome Owners:{n %s" % ", ".join(str(ob) for ob in owners)) shops = loc.db.shopowner caller.msg("{wShop Owners:{n %s" % shops) self.msg("{wBouncers:{n %s" % ", ".join(str(ob) for ob in loc.bouncers)) self.msg("{wDecorators:{n %s" % ", ".join(str(ob) for ob in loc.decorators)) self.msg("{wBanned:{n %s" % ", ".join(str(ob) for ob in loc.banlist)) return if "name" in self.switches: loc.name = self.args or loc.name caller.msg("Room name changed to %s." % loc) return if "exitname" in self.switches: if not self.rhs: caller.msg("Invalid usage.") return rhslist = self.rhs.split(";") rhs = rhslist[0] aliases = rhslist[1:] exit_object = caller.search(self.lhs) if not exit_object: return old = str(exit_object) if exit_object.typeclass_path != settings.BASE_EXIT_TYPECLASS: caller.msg("That is not an exit.") return exit_object.name = rhs exit_object.save() exit_object.aliases.clear() for alias in aliases: exit_object.aliases.add(alias) if exit_object.destination: exit_object.flush_from_cache() caller.msg("%s changed to %s." % (old, exit_object)) return if (set(self.switches) & set(self.personnel_switches)) or (set(self.switches) & set(self.bouncer_switches)): targ = self.caller.player.search(self.lhs) if not targ: return targ = targ.char_ob if "addbouncer" in self.switches: loc.add_bouncer(targ) self.msg("%s is now a bouncer." % targ) return if "rmbouncer" in self.switches: loc.remove_bouncer(targ) self.msg("%s is no longer a bouncer." % targ) return if "adddecorator" in self.switches: loc.add_decorator(targ) self.msg("%s is now a decorator." % targ) return if "rmdecorator" in self.switches: loc.remove_decorator(targ) self.msg("%s is no longer a decorator." % targ) return if "unban" in self.switches: loc.unban_character(targ) self.msg("%s is no longer banned from entering." % targ) return if "ban" in self.switches: loc.ban_character(targ) self.msg("%s is now prevented from entering." % targ) return if "boot" in self.switches: from typeclasses.exits import Exit exit_obj = self.caller.search(self.rhs, typeclass=Exit) if not exit_obj: return if not exit_obj.can_traverse(targ): self.msg("They cannot move through that exit.") return if targ.location != self.caller.location: self.msg("They aren't here.") return exit_obj.at_traverse(targ, exit_obj.destination) self.msg("You have kicked out %s." % targ) targ.msg("You have been kicked out by %s." % self.caller) return try: owner = AssetOwner.objects.get(id=loc.db.room_owner) except AssetOwner.DoesNotExist: caller.msg("No owner is defined here.") return if set(self.switches) & set(self.desc_switches): if "player_made_room" not in loc.tags.all(): self.msg("You cannot change the description to a room that was made by a GM.") return if loc.desc: cost = loc.db.desc_cost or DESC_COST else: cost = 0 if loc.ndb.confirm_desc_change != self.args: caller.msg("Your room's current %s is:" % self.switches[0]) if "desc" in self.switches: caller.msg(loc.desc) elif "springdesc" in self.switches: caller.msg(loc.db.spring_desc) elif "summerdesc" in self.switches: caller.msg(loc.db.summer_desc) elif "winterdesc" in self.switches: caller.msg(loc.db.winter_desc) elif "falldesc" in self.switches: caller.msg(loc.db.autumn_desc) caller.msg("{wCost of changing desc:{n %s economic resources" % cost) if self.args: caller.msg("New desc:") caller.msg(self.args) caller.msg("{wTo confirm this, use the command again.{n") caller.msg("{wChanging this desc will prompt you again for a confirmation.{n") loc.ndb.confirm_desc_change = self.args return if cost: if cost > owner.economic: caller.msg("It would cost %s to re-desc the room, and you have %s." % (cost, owner.economic)) return owner.economic -= cost owner.save() if "desc" in self.switches: loc.desc = self.args if not loc.db.raw_desc: loc.db.raw_desc = self.args if not loc.db.general_desc: loc.db.general_desc = self.args elif "winterdesc" in self.switches: loc.db.winter_desc = self.args elif "summerdesc" in self.switches: loc.db.summer_desc = self.args elif "springdesc" in self.switches: loc.db.spring_desc = self.args elif "falldesc" in self.switches: loc.db.autumn_desc = self.args loc.ndb.confirm_desc_change = None # force raw_desc to update and parse our descs loc.ndb.last_season = None loc.ndb.last_timeslot = None caller.msg("%s changed to:" % self.switches[0]) caller.msg(self.args) return if "confirmhome" in self.switches: if caller.db.homeproposal != loc: caller.msg("You don't have an active invitation to accept here. Have them reissue it.") return caller.attributes.remove("homeproposal") loc.setup_home(caller) caller.msg("You have set up your home here.") return if "confirmshop" in self.switches: if caller.db.shopproposal != loc: caller.msg("You don't have an active invitation to accept here. Have them reissue it.") return caller.attributes.remove("shopproposal") loc.setup_shop(caller) caller.msg("You have set up a shop here.") return if "toggleprivate" in self.switches: if "private" in loc.tags.all(): loc.tags.remove("private") caller.msg("Room no longer private.") return loc.tags.add("private") caller.msg("Room is now private.") return if "setbarracks" in self.switches: tagname = str(owner) + "_barracks" other_barracks = ObjectDB.objects.filter(db_tags__db_key=tagname) for obj in other_barracks: obj.tags.remove(tagname) loc.tags.add(tagname) self.msg("%s set to %s's barracks." % (loc, owner)) return player = caller.player.search(self.args) if not player: return char = player.char_ob if not char: caller.msg("No char.") return if "addhome" in self.switches or "addshop" in self.switches: noun = "home" if "addhome" in self.switches else "shop" if noun == "home": char.db.homeproposal = loc else: char.db.shopproposal = loc if loc.db.shopowner: caller.msg("You must shut down the current shop here before adding another.") return msg = "%s has offered you a %s. To accept it, go to %s" % (caller, noun, loc.key) msg += " and use {w+manageroom/confirm%s{n." % noun player.send_or_queue_msg(msg) caller.msg("You have offered %s this room as a %s." % (char, noun)) return if "rmhome" in self.switches: loc.remove_homeowner(char) player.send_or_queue_msg("Your home at %s has been removed." % loc) return if "rmshop" in self.switches: loc.del_shop() player.send_or_queue_msg("Your shop at %s has been removed." % loc) return class CmdManageShop(ArxCommand): """ +manageshop Usage: +manageshop +manageshop/sellitem <object>=<price> +manageshop/rmitem <object id> +manageshop/all <markup percentage> +manageshop/refinecost <percentage> +manageshop/addrecipe <recipe name>=<markup percentage> +manageshop/rmrecipe <recipe name> +manageshop/addblacklist <player or org name> +manageshop/rmblacklist <player or org name> +manageshop/orgdiscount <org name>=<percentage> +manageshop/chardiscount <character>=<percentage> +manageshop/adddesign <key>=<code> +manageshop/rmdesign <key> Sets prices for your shop. Note that if you use 'all', that will be used for any recipe you don't explicitly set a price for. """ key = "+manageshop" locks = "cmd:all()" help_category = "Home" help_entry_tags = ["shops"] def list_prices(self): """Lists a table of prices for the shop owner""" loc = self.caller.location prices = loc.db.crafting_prices or {} msg = "{wCrafting Prices{n\n" table = PrettyTable(["{wName{n", "{wPrice Markup Percentage{n"]) for price in prices: if price == "removed": continue if price == "all" or price == "refine": name = price else: name = (CraftingRecipe.objects.get(id=price)).name table.add_row([name, "%s%%" % prices[price]]) msg += str(table) msg += "\n{wItem Prices{n\n" table = EvTable("{wID{n", "{wName{n", "{wPrice{n", width=78, border="cells") prices = loc.db.item_prices or {} for price in prices: obj = ObjectDB.objects.get(id=price) table.add_row(price, str(obj), prices[price]) msg += str(table) return msg def list_designs(self): """Lists designs the shop owner has created for crafting templates""" designs = self.caller.location.db.template_designs or {} self.msg("{wTemplate designs:{n %s" % ", ".join(designs.keys())) def func(self): """Execute command.""" caller = self.caller loc = caller.location if caller != loc.db.shopowner: caller.msg("You are not the shop's owner.") return if not self.args: caller.msg(self.list_prices()) org_discounts = (loc.db.discounts or {}).items() char_discounts = (loc.db.char_discounts or {}).items() # replace char with char.key in char_discounts list char_discounts = [(ob[0].key, ob[1]) for ob in char_discounts] discounts = ", ".join(("%s: %s%%" % (ob, val) for ob, val in (org_discounts + char_discounts))) caller.msg("{wDiscounts{n: %s" % discounts) blacklist = [] if loc.db.blacklist: # if ob doesn't have a key, it becomes a string (because corporations aren't ppl) blacklist = [getattr(ob, 'key', str(ob)) for ob in loc.db.blacklist] caller.msg("{wBlacklist{n: %s" % ", ".join(blacklist)) self.list_designs() return if "sellitem" in self.switches: try: price = int(self.rhs) if price < 0: raise ValueError except (TypeError, ValueError): caller.msg("Price must be a positive number.") return results = caller.search(self.lhs, location=caller, quiet=True) obj = AT_SEARCH_RESULT(results, caller, self.lhs, False, nofound_string="You don't carry %s." % self.lhs, multimatch_string="You carry more than one %s:" % self.lhs) if not obj: return obj.at_drop(caller) obj.location = None loc.db.item_prices[obj.id] = price obj.tags.add("for_sale") obj.db.sale_location = loc caller.msg("You put %s for sale for %s silver." % (obj, price)) return if "rmitem" in self.switches: try: num = int(self.args) if num not in loc.db.item_prices: caller.msg("No item by that ID being sold.") return obj = ObjectDB.objects.get(id=num) except ObjectDB.DoesNotExist: caller.msg("No object by that ID exists.") return except (ValueError, TypeError): caller.msg("You have to specify the ID # of an item you're trying to remove.") return obj.move_to(caller) obj.tags.remove("for_sale") obj.attributes.remove("sale_location") del loc.db.item_prices[obj.id] caller.msg("You have removed %s from your sale list." % obj) return if "all" in self.switches or "refinecost" in self.switches: try: cost = int(self.args) if cost < 0: raise ValueError except ValueError: caller.msg("Cost must be a non-negative number.") return if "all" in self.switches: loc.db.crafting_prices['all'] = cost caller.msg("Cost for non-specified recipes set to %s percent markup." % cost) else: loc.db.crafting_prices['refine'] = cost caller.msg("Cost for refining set to %s percent markup." % cost) return if "addrecipe" in self.switches: prices = loc.db.crafting_prices or {} try: recipe = caller.player_ob.Dominion.assets.recipes.get(name__iexact=self.lhs) cost = int(self.rhs) if cost < 0: raise ValueError except (TypeError, ValueError): caller.msg("Cost must be a positive number.") return except (CraftingRecipe.DoesNotExist, CraftingRecipe.MultipleObjectsReturned): caller.msg("Could not retrieve a recipe by that name.") return prices[recipe.id] = cost caller.msg("Price for %s set to %s." % (recipe.name, cost)) removedlist = prices.get("removed", []) if recipe.id in removedlist: removedlist.remove(recipe.id) prices['removed'] = removedlist loc.db.crafting_prices = prices return if "rmrecipe" in self.switches: arg = None prices = loc.db.crafting_prices or {} try: recipe = None if self.lhs.lower() == "all": arg = "all" elif self.lhs.lower() == "refining": arg = "refining" else: recipe = caller.player_ob.Dominion.assets.recipes.get(name__iexact=self.lhs) arg = recipe.id del prices[arg] caller.msg("Price for %s has been removed." % recipe.name if recipe else arg) except KeyError: removedlist = prices.get("removed", []) if arg in removedlist: caller.msg("You had no price listed for that recipe.") else: try: removedlist.append(int(arg)) prices["removed"] = removedlist except ValueError: caller.msg("Must be an ID.") except CraftingRecipe.DoesNotExist: caller.msg("No recipe found by that name.") finally: loc.db.crafting_prices = prices return if "adddesign" in self.switches: designs = loc.db.template_designs or {} try: if not self.rhs: self.msg("Design for %s: %s" % (self.lhs, designs[self.lhs])) return except KeyError: self.list_designs() return designs[self.lhs] = self.rhs self.msg("Raw Design for %s is now: %s" % (self.lhs, raw(self.rhs))) self.msg("Design for %s appears as: %s" % (self.lhs, self.rhs)) loc.db.template_designs = designs return if "rmdesign" in self.switches: designs = loc.db.template_designs or {} try: del designs[self.lhs] self.msg("Design deleted.") except KeyError: self.msg("No design by that name.") self.list_designs() loc.db.template_designs = designs return if "addblacklist" in self.switches or "rmblacklist" in self.switches: blacklist = loc.db.blacklist or [] try: targ = caller.player.search(self.args, nofound_string="No player by that name. Checking organizations.") org = False if not targ: org = True targ = Organization.objects.get(name__iexact=self.args) else: targ = targ.char_ob if "addblacklist" in self.switches: if org: if targ.name in blacklist: caller.msg("They are already in the blacklist.") return blacklist.append(targ.name) else: if targ in blacklist: caller.msg("They are already in the blacklist.") return blacklist.append(targ) caller.msg("%s added to blacklist." % getattr(targ, 'key', targ)) else: if org: if targ.name not in blacklist: caller.msg("They are not in the blacklist.") return blacklist.remove(targ.name) else: if targ not in blacklist: caller.msg("They are not in the blacklist.") return blacklist.remove(targ) caller.msg("%s removed from blacklist." % getattr(targ, 'key', targ)) except Organization.DoesNotExist: caller.msg("No valid target found by that name.") loc.db.blacklist = blacklist return if "orgdiscount" in self.switches: try: org = Organization.objects.get(name__iexact=self.lhs) discount = int(self.rhs) if discount > 100: raise ValueError if discount == 0: loc.db.discounts.pop(org.name, 0) self.msg("Removed discount for %s." % org) return loc.db.discounts[org.name] = discount caller.msg("%s given a discount of %s percent." % (org, discount)) return except (TypeError, ValueError): caller.msg("Discount must be a number, max of 100.") return except Organization.DoesNotExist: caller.msg("No organization by that name found.") return if "chardiscount" in self.switches: if loc.db.char_discounts is None: loc.db.char_discounts = {} try: character = Character.objects.get(db_key__iexact=self.lhs) discount = int(self.rhs) if discount > 100: raise ValueError if discount == 0: loc.db.char_discounts.pop(character, 0) self.msg("Removed discount for %s." % character.key) return loc.db.char_discounts[character] = discount caller.msg("%s given a discount of %s percent." % (character.key, discount)) return except (TypeError, ValueError): caller.msg("Discount must be a number, max of 100.") return except Character.DoesNotExist: caller.msg("No character found by that name.") return caller.msg("Invalid switch.") class CmdBuyFromShop(CmdCraft): """ +shop Usage: +shop +shop/filter <word in item name> +shop/buy <item number> +shop/look <item number> +shop/viewdesigns [<key>] +shop/name <name> +shop/desc <description> +shop/altdesc <description> +shop/adorn <material type>=<amount> +shop/translated_text <language>=<text> +shop/finish [<additional silver to invest>,<AP to invest>] +shop/abandon +shop/changename <object>=<new name> +shop/refine <object>[=<additional silver to spend>,AP to spend>] +shop/addadorn <object>=<material type>,<amount> +shop/craft Allows you to buy objects from a shop. +shop/craft allows you to use a crafter's skill to create an item. Similarly, +shop/refine lets you use a crafter's skill to attempt to improve a crafted object. Check 'help craft' for an explanation of switches, all of which can be used with +shop. Costs and materials are covered by you. +shop/viewdesigns lets you see the crafter's pre-made descriptions that you can copy for items you create. """ key = "+shop" aliases = ["@shop", "shop"] locks = "cmd:all()" help_category = "Home" def get_discount(self): """Returns our percentage discount""" loc = self.caller.location char_discounts = loc.db.char_discounts or {} discount = 0.0 discounts = loc.db.discounts or {} if self.caller in char_discounts: return char_discounts[self.caller] for org in self.caller.player_ob.Dominion.current_orgs: odiscount = discounts.get(org.name, 0.0) if odiscount and not discount: discount = odiscount if odiscount and discount and odiscount > discount: discount = odiscount return discount def get_refine_price(self, base): """Price of refining""" loc = self.caller.location price = 0 prices = loc.db.crafting_prices or {} if "refine" in prices: price = (base * prices["refine"]) / 100.0 elif "all" in prices: price = (base * prices["all"]) / 100.0 if price == 0: return price if price > 0: price -= (price * self.get_discount() / 100.0) if price < 0: return 0 return price raise ValueError def get_recipe_price(self, recipe): """Price for crafting a recipe""" loc = self.caller.location base = recipe.value price = 0 crafting_prices = loc.db.crafting_prices or {} if recipe.id in crafting_prices: price = (base * crafting_prices[recipe.id]) / 100.0 elif "all" in crafting_prices: price = (base * crafting_prices["all"]) / 100.0 if price is not None: price -= (price * self.get_discount() / 100.0) if price < 0: return 0 return price # no price defined raise ValueError def list_prices(self): """List prices of everything""" loc = self.caller.location prices = loc.db.crafting_prices or {} msg = "{wCrafting Prices{n\n" table = PrettyTable(["{wName{n", "{wCraft Price{n", "{wRefine Price{n"]) recipes = loc.db.shopowner.player_ob.Dominion.assets.recipes.all().order_by('name') # This try/except block corrects 'removed' lists that are corrupted by # non-integers, because that was a thing once upon a time. try: removed = prices.get("removed", []) recipes = recipes.exclude(id__in=removed) except ValueError: removed = [ob for ob in removed if isinstance(ob, int)] prices['removed'] = removed recipes = recipes.exclude(id__in=removed) recipes = self.filter_shop_qs(recipes, "name") for recipe in recipes: try: refineprice = str(self.get_refine_price(recipe.value)) table.add_row([recipe.name, str(recipe.additional_cost + self.get_recipe_price(recipe)), refineprice]) except (ValueError, TypeError): self.msg("{rError: Recipe %s does not have a price defined.{n" % recipe.name) if recipes: msg += str(table) msg += "\n{wItem Prices{n\n" table = EvTable("{wID{n", "{wName{n", "{wPrice{n", width=78, border="cells") prices = loc.db.item_prices or {} sale_items = ObjectDB.objects.filter(id__in=prices.keys()) sale_items = self.filter_shop_qs(sale_items, "db_key") for item in sale_items: price = prices[item.id] price -= (price * self.get_discount() / 100.0) table.add_row(item.id, item.name, price) if sale_items: msg += str(table) designs = self.filter_shop_dict(loc.db.template_designs or {}) if designs: msg += "\n{wNames of designs:{n %s" % ", ".join(designs.keys()) if not recipes and not sale_items and not designs: msg = "Nothing found." return msg def filter_shop_qs(self, shop_qs, field_name): """Returns filtered queryset if a filter word exists""" if "filter" in self.switches and self.args: filter_query = {"%s__icontains" % field_name: self.args} shop_qs = shop_qs.filter(**filter_query) return shop_qs def filter_shop_dict(self, shop_dict): """Returns filtered dict if a filter word exists""" if "filter" in self.switches and self.args: shop_dict = {name: value for name, value in shop_dict.items() if self.args.lower() in name.lower()} return shop_dict def pay_owner(self, price, msg): """Pay money to the other and send an inform of the sale""" loc = self.caller.location loc.db.shopowner.pay_money(-price) assets = loc.db.shopowner.player_ob.assets if price >= assets.min_silver_for_inform: assets.inform(msg, category="shop", append=True) def buy_item(self, item): """Buy an item from inventory - pay the owner and get the item""" loc = self.caller.location price = loc.db.item_prices[item.id] price -= price * (self.get_discount() / 100.0) self.caller.pay_money(price) self.pay_owner(price, "%s has bought %s for %s." % (self.caller, item, price)) self.caller.msg("You paid %s for %s." % (price, item)) item.move_to(self.caller) item.tags.remove("for_sale") item.attributes.remove("sale_location") del loc.db.item_prices[item.id] if hasattr(item, "rmkey"): if item.rmkey(loc.db.shopowner): item.grantkey(self.caller) self.caller.msg("Good deal! The owner gave you a key for %s." % item) return self.caller.msg("Shady deal? The owner didn't have a key for %s to give you." % item) def check_blacklist(self): """See if we're allowed to buy""" caller = self.caller loc = caller.location blacklist = loc.db.blacklist or [] if caller in blacklist: return True for org in caller.player_ob.Dominion.current_orgs: if org.name in blacklist: return True return False def func(self): """Execute command.""" caller = self.caller loc = caller.location self.crafter = loc.db.shopowner if not self.crafter: self.msg("No shop owner is defined.") return if self.check_blacklist(): caller.msg("You are not permitted to buy from this shop.") return if self.crafter.roster.roster.name == "Gone": self.msg("The shop owner is dead.") return if "filter" in self.switches or (not self.switches and not self.args): caller.msg(self.list_prices()) project = caller.db.crafting_project if project: caller.msg(self.display_project(project)) return if "viewdesigns" in self.switches: designs = loc.db.template_designs or {} if not self.args: self.msg("Names of designs: %s" % ", ".join(designs.keys())) return try: design = designs[self.args] self.msg("{wDesign's appearance:{n\n%s" % design) self.msg("\n{wRaw code of design:{n\n%s" % raw(design)) except KeyError: self.msg("No design found by that name.") self.msg("Names of designs: %s" % ", ".join(designs.keys())) return if "buy" in self.switches: try: num = int(self.args) price = loc.db.item_prices[num] obj = ObjectDB.objects.get(id=num) except (TypeError, ValueError, KeyError): caller.msg("You must supply the ID number of an item being sold.") return if price > caller.db.currency: caller.msg("You cannot afford it.") return self.buy_item(obj) return if "look" in self.switches: try: num = int(self.args) obj = ObjectDB.objects.get(id=num, id__in=loc.db.item_prices.keys()) except (TypeError, ValueError): self.msg("Please provide a number of an item.") return except ObjectDB.DoesNotExist: caller.msg("No item found by that number.") return caller.msg(obj.return_appearance(caller)) return if set(self.switches) & set(self.crafting_switches + ("craft",)): return CmdCraft.func(self) caller.msg("Invalid switch.") class ShopCmdSet(CmdSet): """CmdSet for shop spaces.""" key = "ShopCmdSet" priority = 101 duplicates = False no_exits = False no_objs = False def at_cmdset_creation(self): """ This is the only method defined in a cmdset, called during its creation. It should populate the set with command instances. Note that it can also take other cmdsets as arguments, which will be used by the character default cmdset to add all of these onto the internal cmdset stack. They will then be able to removed or replaced as needed. """ self.add(CmdManageShop()) self.add(CmdBuyFromShop())
py
1a49b3549a156a35ee853407d317e1d375310132
from django.conf import settings from django.db import models from drf_admin.utils.models import BaseModel # Create your models here. class ErrorLogs(BaseModel): """ 错误日志 """ username = models.CharField(max_length=32, verbose_name='用户') view = models.CharField(max_length=32, verbose_name='视图') desc = models.TextField(verbose_name='描述') ip = models.GenericIPAddressField(verbose_name='IP') detail = models.TextField(verbose_name='详情') objects = models.Manager() class Meta: db_table = 'monitor_errorlogs' verbose_name = '错误日志' verbose_name_plural = verbose_name ordering = ['-id'] class IpBlackList(BaseModel): """ ip黑名单 """ ip = models.GenericIPAddressField(unique=True, verbose_name='IP') objects = models.Manager() class Meta: db_table = 'monitor_ipblacklist' verbose_name = 'IP黑名单' verbose_name_plural = verbose_name ordering = ['-id'] class OnlineUsers(BaseModel): """ 在线用户 """ user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name='用户') ip = models.GenericIPAddressField(verbose_name='IP') objects = models.Manager() class Meta: db_table = 'monitor_onlineusers' verbose_name = '在线用户' verbose_name_plural = verbose_name ordering = ['-id']
py
1a49b3e166381beda5dea5417a5b61c273e2ed07
"""Posts views.""" # Django from django.urls import reverse_lazy from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ListView, DetailView, CreateView # Form from posts.forms import PostForm # Models from posts.models import Post class PostsFeedView(LoginRequiredMixin, ListView): """Return all published posts.""" template_name = 'posts/feed.html' model = Post ordering = ('-created',) paginate_by = 30 context_object_name = 'posts' class PostDetailView(LoginRequiredMixin, DetailView): """Return post detail.""" template_name = 'posts/detail.html' queryset = Post.objects.all() context_object_name = 'post' class CreatePostView(LoginRequiredMixin, CreateView): """Create a new post.""" template_name = 'posts/new.html' form_class = PostForm success_url = reverse_lazy('posts:feed') def get_context_data(self, **kwargs): """Add user and profile to context""" context = super().get_context_data(**kwargs) context['user'] = self.request.user context['profile'] = self.request.user.profile return context
py
1a49b4737e387f8b07be768759359c2b8087ca11
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hopeland_33964.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
py
1a49b4bd4b6ce28068501caeb7cde461f3f44048
#!/usr/bin/env python # -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # Copyright (c) 2009 Nicolas Rougier, Matthieu Kluj, Jessy Cyganczuk # Copyright (c) 2015 James Gaston. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- import pyglet from pyglet.gl import * from PyWidget3 import * window = pyglet.window.Window(resizable=True) slider = Slider(x=50, y=50) window.push_handlers(slider) @window.event def on_draw(): window.clear() slider.on_draw() @slider.event def on_value_change(slider): print(slider.value) pyglet.app.run()
py
1a49b53e8b0065d2320960f162774ce4c99ddf91
# # Copyright 2013 eNovance # # Author: Julien Danjou <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/udp.py """ import datetime import mock import msgpack from ceilometer.openstack.common.fixture import config from ceilometer.openstack.common import network_utils from ceilometer.openstack.common import test from ceilometer.publisher import udp from ceilometer.publisher import utils from ceilometer import sample COUNTER_SOURCE = 'testsource' class TestUDPPublisher(test.BaseTestCase): test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), ] def _make_fake_socket(self, published): def _fake_socket_socket(family, type): def record_data(msg, dest): published.append((msg, dest)) udp_socket = mock.Mock() udp_socket.sendto = record_data return udp_socket return _fake_socket_socket def setUp(self): super(TestUDPPublisher, self).setUp() self.CONF = self.useFixture(config.Config()).conf self.CONF.publisher.metering_secret = 'not-so-secret' def test_published(self): self.data_sent = [] with mock.patch('socket.socket', self._make_fake_socket(self.data_sent)): publisher = udp.UDPPublisher( network_utils.urlsplit('udp://somehost')) publisher.publish_samples(None, self.test_data) self.assertEqual(5, len(self.data_sent)) sent_counters = [] for data, dest in self.data_sent: counter = msgpack.loads(data) sent_counters.append(counter) # Check destination self.assertEqual(('somehost', self.CONF.collector.udp_port), dest) # Check that counters are equal self.assertEqual(sorted( [utils.meter_message_from_counter(d, "not-so-secret") for d in self.test_data]), sorted(sent_counters)) @staticmethod def _raise_ioerror(*args): raise IOError def _make_broken_socket(self, family, type): udp_socket = mock.Mock() udp_socket.sendto = self._raise_ioerror return udp_socket def test_publish_error(self): with mock.patch('socket.socket', self._make_broken_socket): publisher = udp.UDPPublisher( network_utils.urlsplit('udp://localhost')) publisher.publish_samples(None, self.test_data)
py
1a49b56a214a8ea381e453d6f684835552ecaa08
from dataLoader import * import tensorflow as tf from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow import keras from tensorflow.keras import metrics from ModelUtil import * import configparser import sys import numpy as np os.environ['CUDA_VISIBLE_DEVICES'] = '0' tf.config.experimental.set_memory_growth(tf.config.experimental.list_physical_devices('GPU')[0], True) config = configparser.ConfigParser() config.read('target_model_config.ini') DATA_NAME = sys.argv[1] if len(sys.argv) > 1 else "CIFAR" MODEL = sys.argv[2] if len(sys.argv) > 2 else "ResNet50" EPOCHS = int(config['{}_{}'.format(DATA_NAME, MODEL)]['EPOCHS']) BATCH_SIZE = 64 LEARNING_RATE = float(config['{}_{}'.format(DATA_NAME, MODEL)]['LEARNING_RATE']) WEIGHTS_PATH = "weights/Target/{}_{}.hdf5".format(DATA_NAME, MODEL) (x_train, y_train), (x_test, y_test), _ = globals()['load_' + DATA_NAME]('TargetModel') np.random.seed(1) tf.random.set_seed(1) def train(model, x_train, y_train): """ Train the target model and save the weight of the model :param model: the model that will be trained :param x_train: the image as numpy format :param y_train: the label for x_train :param weights_path: path to save the model file :return: None """ model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(lr=5e-5), metrics=[metrics.CategoricalAccuracy(), metrics.Precision(), metrics.Recall()]) model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=BATCH_SIZE, epochs=EPOCHS) model.save(WEIGHTS_PATH) def evaluate(x_test, y_test): model = keras.models.load_model(WEIGHTS_PATH) model.compile(loss='categorical_crossentropy', metrics=[metrics.CategoricalAccuracy(), metrics.Precision(), metrics.Recall()]) loss, accuracy, precision, recall = model.evaluate(x_test, y_test, verbose=1) F1_Score = 2 * (precision * recall) / (precision + recall) print('loss:%.4f accuracy:%.4f precision:%.4f recall:%.4f F1_Score:%.4f' % (loss, accuracy, precision, recall, F1_Score)) TargetModel = globals()['create_{}_model'.format(MODEL)](x_train.shape[1:], y_train.shape[1]) train(TargetModel, x_train, y_train) evaluate(x_train, y_train) evaluate(x_test, y_test)
py
1a49b635ca8a972217537b622cf98e27d6f06dce
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .proxy_resource import ProxyResource class ServerDnsAlias(ProxyResource): """A server DNS alias. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource ID. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar azure_dns_record: The fully qualified DNS record for alias :vartype azure_dns_record: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'azure_dns_record': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'azure_dns_record': {'key': 'properties.azureDnsRecord', 'type': 'str'}, } def __init__(self, **kwargs) -> None: super(ServerDnsAlias, self).__init__(**kwargs) self.azure_dns_record = None
py
1a49b6c2094a8fdae619bcece846473470c33616
from rainworms import * import random class Bot(): def __init__(self): self.game: RainWorms = None @staticmethod def in_roll_take_phase(possible_actions: List[PlayerAction]) -> bool: return any([ action for action in possible_actions if action.action_type == PlayerActionType.ROLL_DICE ]) @staticmethod def in_take_steal_phase(possible_actions: List[PlayerAction]) -> bool: return any([ action for action in possible_actions if action.action_type == PlayerActionType.TAKE_STONE_WITH_VALUE or action.action_type == PlayerActionType.STEAL_STONE_WITH_VALUE ]) @staticmethod def in_pick_dice_phase(possible_actions: List[PlayerAction]) -> bool: return any([ action for action in possible_actions if action.action_type == PlayerActionType.PICK_DICE_SET_WITH_FACE ]) def select_action(self, player: Player, possible_actions: List[PlayerAction]): raise NotImplementedError def game_loop(self, game, player: Player, turn): self.game = game possible_actions = next(turn) while 1: action = self.select_action(player, possible_actions) try: possible_actions = turn.send(action) except StopIteration: break class RandomBot(Bot): def __init__(self): super(RandomBot, self).__init__() self.name = f"RandomBot" def select_action(self, player: Player, possible_actions: List[PlayerAction]): return random.choice(possible_actions) class GreedyBot(Bot): """ This bot tries to take the highest scoring set of dice every time, and will always take the highest stone it can. It will start looking to take stones after a set threshold number of rolls. """ def __init__(self, take_stone_threshold): super(GreedyBot, self).__init__() self.name = f"GreedyBot_{take_stone_threshold}" self.take_stone_threshold = take_stone_threshold @staticmethod def key_dice_set_actions(action: PlayerAction) -> int: number = action.argument.face.value is_worm = int(action.argument.face.name == "Worm") return number + is_worm def select_action(self, player: Player, possible_actions: List[PlayerAction]) -> PlayerAction: if Bot.in_roll_take_phase(possible_actions): # Pick `roll` until we reach a threshold, then pick take a stone if possible. if len(Utils.count_faces(player.selected_dice)) < self.take_stone_threshold: return PlayerAction(PlayerActionType.ROLL_DICE, None) if any([action for action in possible_actions if action.action_type == PlayerActionType.TAKE_STONE]): return PlayerAction(PlayerActionType.TAKE_STONE, None) return PlayerAction(PlayerActionType.ROLL_DICE, None) elif Bot.in_pick_dice_phase(possible_actions): possible_actions = sorted(possible_actions, key=self.key_dice_set_actions) return possible_actions[-1] elif Bot.in_take_steal_phase(possible_actions): # Try to pick the highest stone, disregarding stealing or taking from the bank sorted_actions = sorted(possible_actions, key=lambda x: x.argument) best_stone_action = sorted_actions[-1] return best_stone_action else: # Fall back to a random choice return random.choice(possible_actions) class GreedyStealingBot(GreedyBot): """ This is GreedyBot, except it tries to take the highest stone that it can steal. """ def __init__(self, take_stone_threshold): super(GreedyStealingBot, self).__init__() self.name = f"GreedyStealingBot_{take_stone_threshold}" self.take_stone_threshold = take_stone_threshold @staticmethod def key_pick_stone(action: PlayerAction) -> int: score = action.argument # Add 20 points if this is a stealable stone. if action.action_type == PlayerActionType.STEAL_STONE_WITH_VALUE: score += 20 return score
py
1a49b745dbc619b5a144a17b686a2a75e2845f69
# Copyright 2021 solo-learn development team. # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies # or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE # FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import os from pprint import pprint from pytorch_lightning import Trainer, seed_everything from pytorch_lightning.callbacks import LearningRateMonitor from pytorch_lightning.loggers import WandbLogger from pytorch_lightning.plugins import DDPPlugin from solo.args.setup import parse_args_pretrain from solo.methods import METHODS from solo.utils.auto_resumer import AutoResumer try: from solo.methods.dali import PretrainABC except ImportError as e: print(e) _dali_avaliable = False else: _dali_avaliable = True try: from solo.utils.auto_umap import AutoUMAP except ImportError: _umap_available = False else: _umap_available = True from solo.utils.checkpointer import Checkpointer from solo.utils.classification_dataloader import prepare_data as prepare_data_classification from solo.utils.pretrain_dataloader import ( prepare_dataloader, prepare_datasets, prepare_n_crop_transform, prepare_transform, ) def main(): seed_everything(5) args = parse_args_pretrain() assert args.method in METHODS, f"Choose from {METHODS.keys()}" if args.num_large_crops != 2: assert args.method == "wmse" MethodClass = METHODS[args.method] if args.dali: assert ( _dali_avaliable ), "Dali is not currently avaiable, please install it first with [dali]." MethodClass = type(f"Dali{MethodClass.__name__}", (MethodClass, PretrainABC), {}) model = MethodClass(**args.__dict__) # contrastive dataloader if not args.dali: # asymmetric augmentations if args.unique_augs > 1: transform = [ prepare_transform(args.dataset, **kwargs) for kwargs in args.transform_kwargs ] else: transform = [prepare_transform(args.dataset, **args.transform_kwargs)] transform = prepare_n_crop_transform(transform, num_crops_per_aug=args.num_crops_per_aug) if args.debug_augmentations: print("Transforms:") pprint(transform) train_dataset = prepare_datasets( args.dataset, transform, data_dir=args.data_dir, train_dir=args.train_dir, no_labels=args.no_labels, ) train_loader = prepare_dataloader( train_dataset, batch_size=args.batch_size, num_workers=args.num_workers ) # normal dataloader for when it is available if args.dataset == "custom" and (args.no_labels or args.val_dir is None): val_loader = None elif args.dataset in ["imagenet100", "imagenet"] and args.val_dir is None: val_loader = None else: _, val_loader = prepare_data_classification( args.dataset, data_dir=args.data_dir, train_dir=args.train_dir, val_dir=args.val_dir, batch_size=args.batch_size, num_workers=args.num_workers, ) callbacks = [] # wandb logging if args.wandb: wandb_logger = WandbLogger( name=args.name, project=args.project, entity=args.entity, offline=args.offline, ) wandb_logger.watch(model, log="gradients", log_freq=100) wandb_logger.log_hyperparams(args) # lr logging lr_monitor = LearningRateMonitor(logging_interval="epoch") callbacks.append(lr_monitor) if args.save_checkpoint: # save checkpoint on last epoch only ckpt = Checkpointer( args, logdir=os.path.join(args.checkpoint_dir, args.method), frequency=args.checkpoint_frequency, ) callbacks.append(ckpt) if args.auto_umap: assert ( _umap_available ), "UMAP is not currently avaiable, please install it first with [umap]." auto_umap = AutoUMAP( args, logdir=os.path.join(args.auto_umap_dir, args.method), frequency=args.auto_umap_frequency, ) callbacks.append(auto_umap) if args.auto_resume and args.resume_from_checkpoint is None: auto_resumer = AutoResumer( checkpoint_dir=os.path.join(args.checkpoint_dir, args.method), max_hours=args.auto_resumer_max_hours, ) resume_from_checkpoint = auto_resumer.find_checkpoint(args) if resume_from_checkpoint is not None: print( "Resuming from previous checkpoint that matches specifications:", f"'{resume_from_checkpoint}'", ) args.resume_from_checkpoint = resume_from_checkpoint trainer = Trainer.from_argparse_args( args, logger=wandb_logger if args.wandb else None, callbacks=callbacks, plugins=DDPPlugin(find_unused_parameters=True) if args.accelerator == "ddp" else None, checkpoint_callback=False, terminate_on_nan=True, ) if args.dali: trainer.fit(model, val_dataloaders=val_loader) else: trainer.fit(model, train_loader, val_loader) if __name__ == "__main__": main()
py
1a49b8295e4ff8848045af28d5a56d463d5c3df3
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.utils import preprocessing class MockRandomGenerator: def __init__(self, value): self.value = value def random_uniform(self, shape, minval, maxval, dtype=None): del minval, maxval return tf.constant(self.value, dtype=dtype) class PreprocessingTestCase(tf.test.TestCase): def setUp(self): super().setUp() def test_transform_to_standard_range_neg_one_range(self): x = tf.constant([-1, 0, 1]) x = preprocessing.transform_value_range( x, original_range=[-1, 1], target_range=[0, 255] ) self.assertAllClose(x, [0.0, 127.5, 255.0]) def test_transform_to_same_range(self): x = tf.constant([-1, 0, 1]) x = preprocessing.transform_value_range( x, original_range=[0, 255], target_range=[0, 255] ) self.assertAllClose(x, [-1, 0, 1]) def test_transform_to_standard_range(self): x = tf.constant([8 / 255, 9 / 255, 255 / 255]) x = preprocessing.transform_value_range( x, original_range=[0, 1], target_range=[0, 255] ) self.assertAllClose(x, [8.0, 9.0, 255.0]) def test_transform_to_value_range(self): x = tf.constant([128.0, 255.0, 0.0]) x = preprocessing.transform_value_range( x, original_range=[0, 255], target_range=[0, 1] ) self.assertAllClose(x, [128 / 255, 1, 0]) def test_random_inversion(self): generator = MockRandomGenerator(0.75) self.assertEqual(preprocessing.random_inversion(generator), -1.0) generator = MockRandomGenerator(0.25) self.assertEqual(preprocessing.random_inversion(generator), 1.0)
py
1a49b838e021c7cbd4336f477109e127bfad7fcf
from __future__ import absolute_import import re import os import time import math import toolz import click import pprint import logging import inspect import warnings import itertools import functools import subprocess from jrnr._compat import exclusive_open FORMAT = '%(asctime)-15s %(message)s' logger = logging.getLogger('uploader') logger.setLevel('DEBUG') formatter = logging.Formatter(FORMAT) SLURM_SCRIPT = ''' #!/bin/bash # Job name: #SBATCH --job-name={jobname} # # Partition: #SBATCH --partition={partition} # # Account: #SBATCH --account=co_laika # # QoS: #SBATCH --qos=savio_lowprio # #SBATCH --nodes=1 # # Wall clock limit: #SBATCH --time=72:00:00 # #SBATCH --requeue {dependencies} {output} '''.strip() SLURM_MULTI_SCRIPT = SLURM_SCRIPT + ''' # #SBATCH --array=0-{maxnodes} # set up directories mkdir -p {logdir} mkdir -p locks ## Run command for i in {{1..{jobs_per_node}}} do nohup python {filepath} do_job --job_name {jobname} \ --job_id {uniqueid} --num_jobs {numjobs} --logdir "{logdir}" {flags} \ > {logdir}/nohup-{jobname}-{uniqueid}-${{SLURM_ARRAY_TASK_ID}}-$i.out & done python {filepath} wait --job_name {jobname} \ --job_id {uniqueid} --num_jobs {numjobs} {flags} ''' SLURM_SINGLE_SCRIPT = SLURM_SCRIPT + ''' ## Run command python {filepath} {flags} ''' def _product(values): ''' Examples -------- .. code-block:: python >>> _product([3, 4, 5]) 60 ''' return functools.reduce(lambda x, y: x*y, values, 1) def _unpack_job(specs): job = {} for spec in specs: job.update(spec) return job def generate_jobs(job_spec): for specs in itertools.product(*job_spec): yield _unpack_job(specs) def count_jobs(job_spec): return _product(map(len, job_spec)) def _prep_slurm( filepath, jobname='slurm_job', partition='savio2', job_spec=None, limit=None, uniqueid='"${SLURM_ARRAY_JOB_ID}"', jobs_per_node=24, maxnodes=100, dependencies=None, logdir='log', flags=None): depstr = '' if (dependencies is not None) and (len(dependencies) > 1): status, deps = dependencies if len(deps) > 0: depstr += ( '#\n#SBATCH --dependency={}:{}' .format(status, ','.join(map(str, deps)))) if flags: flagstr = ' '.join(map(str, flags)) else: flagstr = '' if job_spec: n = count_jobs(job_spec) if limit is not None: n = min(limit, n) numjobs = n output = ( '#\n#SBATCH --output {logdir}/slurm-{jobname}-%A_%a.out' .format(jobname=jobname, logdir=logdir)) template = SLURM_MULTI_SCRIPT else: numjobs = 1 output = ( '#\n#SBATCH --output {logdir}/slurm-{jobname}-%A.out' .format(jobname=jobname, logdir=logdir)) template = SLURM_SINGLE_SCRIPT with open('run-slurm.sh', 'w+') as f: f.write(template.format( jobname=jobname, partition=partition, numjobs=numjobs, jobs_per_node=jobs_per_node, maxnodes=(maxnodes-1), uniqueid=uniqueid, filepath=filepath.replace(os.sep, '/'), dependencies=depstr, flags=flagstr, logdir=logdir, output=output)) def run_slurm( filepath, jobname='slurm_job', partition='savio2', job_spec=None, limit=None, uniqueid='"${SLURM_ARRAY_JOB_ID}"', jobs_per_node=24, maxnodes=100, dependencies=None, logdir='log', flags=None): _prep_slurm( filepath=filepath, jobname=jobname, partition=partition, job_spec=job_spec, limit=limit, uniqueid=uniqueid, jobs_per_node=jobs_per_node, maxnodes=maxnodes, dependencies=dependencies, logdir=logdir, flags=flags) job_command = ['sbatch', 'run-slurm.sh'] proc = subprocess.Popen( job_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() matcher = re.search(r'^\s*Submitted batch job (?P<run_id>[0-9]+)\s*$', out) if matcher: run_id = int(matcher.group('run_id')) else: run_id = None if err: raise OSError('Error encountered submitting job: {}'.format(err)) return run_id def get_job_by_index(job_spec, index): ''' Examples -------- .. code-block:: python >>> job = get_job_by_index( ... ( ... [{'let': 'a'}, {'let': 'b'}, {'let': 'c'}], ... [{'num': 1}, {'num': 2}, {'num': 3}], ... [{'pitch': 'do'}, {'pitch': 'rey'}, {'pitch': 'mi'}]), ... 5) ... >>> sorted(zip(job.keys(), job.values())) # test job ordered [('let', 'a'), ('num', 2), ('pitch', 'mi')] >>> job = get_job_by_index( ... tuple(map( ... lambda x: [{x: i} for i in x], ... ['hi', 'hello', 'bye'])), ... 10) ... >>> sorted(zip(job.keys(), job.values())) # test job ordered [('bye', 'y'), ('hello', 'l'), ('hi', 'h')] ''' return _unpack_job([ job_spec[i][ (index//(_product(map(len, job_spec[i+1:]))) % len(job_spec[i]))] for i in range(len(job_spec))]) def _get_call_args(job_spec, index=0): ''' Places stringified job parameters into `metadata` dict along with job spec .. code-block:: python >>> job_spec = ( ... [{'ordinal': 1, 'zeroth': 0}, {'ordinal': 2, 'zeroth': 1}], ... [{'letter': 'a'}, {'letter': 'b'}], ... [{'name': 'susie', 'age': 8}, {'name': 'billy', 'age': 6}]) ... >>> job = _get_call_args(job_spec, 2) >>> job # doctest: +SKIP {'age': 8, 'letter': 'b', 'name': 'susie', 'ordinal': 1, 'zeroth': 0} >>> notmeta = {k: v for k, v in job.items() if k != 'metadata'} >>> meta = job['metadata'] >>> sorted(zip(notmeta.keys(), notmeta.values())) \ # doctest: +NORMALIZE_WHITESPACE [('age', 8), ('letter', 'b'), ('name', 'susie'), ('ordinal', 1), ('zeroth', 0)] >>> sorted(zip(meta.keys(), meta.values())) \ # doctest: +NORMALIZE_WHITESPACE [('age', '8'), ('letter', 'b'), ('name', 'susie'), ('ordinal', '1'), ('zeroth', '0')] ''' job = get_job_by_index(job_spec, index) metadata = {} metadata.update({k: str(v) for k, v in job.items()}) call_args = {'metadata': metadata} call_args.update(job) return call_args @toolz.curry def slurm_runner( run_job, job_spec, filepath=None, onfinish=None, return_index=False): ''' Decorator to create a SLURM runner job management command-line application Parameters ---------- run_job : function Function executed for each task specified by ``job_spec``. ``run_job`` must be of the form ``run_job(metadata, interactive=False, **kwargs)`` where ``kwargs`` is the set of keyword terms specified by ``job_spec``. job_spec : tuple of lists of dicts Job specification in the format ``([{kwargs: vals}, ...], [...], )``. ``slurm_runner`` will iterate through all combinations of the lists in ``job_spec``, combining paired kwarg dictionaries and passing them as arguments to ``run_job``. filepath : str, optional Path to file to call when running tasks. By default (None), slurm_runner infers the filepath from the location of ``run_job``. onfinish : function, optional Provide a function to call when all jobs have been completed. Default (None) takes no action. return_index : bool, optional Adds a ``task_id`` argument to run_job call with 0-indexed ID of current task Returns ------- slurm_runner : click.Group A SLURM runner job management command-line application ''' if filepath is None: filepath = os.path.abspath(inspect.getfile(run_job)) else: warning = ( "the `filepath` argument is deprecated and will be " + "removed in the future.") warnings.warn(warning, FutureWarning) @click.group() def slurm(): pass @slurm.command() @click.option( '--limit', '-l', type=int, required=False, default=None, help='Number of iterations to run') @click.option( '--jobs_per_node', '-n', type=int, required=False, default=24, help='Number of jobs to run per node') @click.option( '--maxnodes', '-x', type=int, required=False, default=100, help='Number of nodes to request for this job') @click.option( '--jobname', '-j', default='test', help='name of the job') @click.option( '--partition', '-p', default='savio2', help='resource on which to run') @click.option('--dependency', '-d', type=int, multiple=True) @click.option( '--logdir', '-L', default='log', help='Directory to write log files') @click.option( '--uniqueid', '-u', default='"${SLURM_ARRAY_JOB_ID}"', help='Unique job pool id') def prep( limit=None, jobs_per_node=24, jobname='slurm_job', dependency=None, partition='savio2', maxnodes=100, logdir='log', uniqueid='"${SLURM_ARRAY_JOB_ID}"'): _prep_slurm( filepath=filepath, jobname=jobname, partition=partition, job_spec=job_spec, jobs_per_node=jobs_per_node, maxnodes=maxnodes, limit=limit, uniqueid=uniqueid, logdir=logdir, dependencies=('afterany', list(dependency))) @slurm.command() @click.option( '--limit', '-l', type=int, required=False, default=None, help='Number of iterations to run') @click.option( '--jobs_per_node', '-n', type=int, required=False, default=24, help='Number of jobs to run per node') @click.option( '--maxnodes', '-x', type=int, required=False, default=100, help='Number of nodes to request for this job') @click.option( '--jobname', '-j', default='test', help='name of the job') @click.option( '--partition', '-p', default='savio2', help='resource on which to run') @click.option( '--dependency', '-d', type=int, multiple=True) @click.option( '--logdir', '-L', default='log', help='Directory to write log files') @click.option( '--uniqueid', '-u', default='"${SLURM_ARRAY_JOB_ID}"', help='Unique job pool id') def run( limit=None, jobs_per_node=24, jobname='slurm_job', dependency=None, partition='savio2', maxnodes=100, logdir='log', uniqueid='"${SLURM_ARRAY_JOB_ID}"'): if not os.path.isdir(logdir): os.makedirs(logdir) slurm_id = run_slurm( filepath=filepath, jobname=jobname, partition=partition, job_spec=job_spec, jobs_per_node=jobs_per_node, maxnodes=maxnodes, limit=limit, uniqueid=uniqueid, logdir=logdir, dependencies=('afterany', list(dependency))) finish_id = run_slurm( filepath=filepath, jobname=jobname+'_finish', partition=partition, dependencies=('afterany', [slurm_id]), logdir=logdir, flags=['cleanup', slurm_id]) print('run job: {}\non-finish job: {}'.format(slurm_id, finish_id)) @slurm.command() @click.argument('slurm_id') def cleanup(slurm_id): proc = subprocess.Popen( [ 'sacct', '-j', slurm_id, '--format=JobID,JobName,MaxRSS,Elapsed,State'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() print(out) if onfinish: onfinish() @slurm.command() @click.option('--job_name', required=True) @click.option('--job_id', required=True) @click.option('--num_jobs', required=True, type=int) @click.option( '--logdir', '-L', default='log', help='Directory to write log files') def do_job(job_name, job_id, num_jobs=None, logdir='log'): if not os.path.isdir('locks'): os.makedirs('locks') if not os.path.isdir(logdir): os.makedirs(logdir) for task_id in range(num_jobs): lock_file = ( 'locks/{}-{}-{}.{{}}' .format(job_name, job_id, task_id)) if os.path.exists(lock_file.format('done')): print('{} already done. skipping'.format(task_id)) continue elif os.path.exists(lock_file.format('err')): print('{} previously errored. skipping'.format(task_id)) continue try: with exclusive_open(lock_file.format('lck')): pass # Check for race conditions if os.path.exists(lock_file.format('done')): print('{} already done. skipping'.format(task_id)) if os.path.exists(lock_file.format('lck')): os.remove(lock_file.format('lck')) continue elif os.path.exists(lock_file.format('err')): print('{} previously errored. skipping'.format(task_id)) if os.path.exists(lock_file.format('lck')): os.remove(lock_file.format('lck')) continue except OSError: print('{} already in progress. skipping'.format(task_id)) continue handler = logging.FileHandler(os.path.join( logdir, 'run-{}-{}-{}.log'.format(job_name, job_id, task_id))) handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) logger.addHandler(handler) try: job_kwargs = _get_call_args(job_spec, task_id) if return_index: job_kwargs.update({'task_id': task_id}) logger.debug('Beginning job\nkwargs:\t{}'.format( pprint.pformat(job_kwargs['metadata'], indent=2))) run_job(**job_kwargs) except (KeyboardInterrupt, SystemExit): try: logger.error('{} interupted, removing .lck file before exiting'.format(task_id)) os.remove(lock_file.format('lck')) except: pass raise except Exception as e: logger.error( 'Error encountered in job {} {} {}' .format(job_name, job_id, task_id), exc_info=e) with open(lock_file.format('err'), 'w+'): pass else: with open(lock_file.format('done'), 'w+'): pass finally: if os.path.exists(lock_file.format('lck')): os.remove(lock_file.format('lck')) logger.removeHandler(handler) @slurm.command() @click.option('--job_name', '-j', required=True) @click.option('--job_id', '-u', required=True) def status(job_name, job_id, num_jobs=None, logdir='log'): n = count_jobs(job_spec) locks = os.listdir('locks') count = int(math.log10(n)//1 + 1) locked = len([ i for i in range(n) if '{}-{}-{}.lck'.format(job_name, job_id, i) in locks]) done = len([ i for i in range(n) if '{}-{}-{}.done'.format(job_name, job_id, i) in locks]) err = len([ i for i in range(n) if '{}-{}-{}.err'.format(job_name, job_id, i) in locks]) print( ("\n".join(["{{:<15}}{{:{}d}}".format(count) for _ in range(4)])) .format( 'jobs:', n, 'done:', done, 'in progress:', locked, 'errored:', err)) @slurm.command() @click.option('--job_name', required=True) @click.option('--job_id', required=True) @click.option('--num_jobs', required=True, type=int) def wait(job_name, job_id, num_jobs=None): for task_id in range(num_jobs): while not os.path.exists( 'locks/{}-{}-{}.done' .format(job_name, job_id, task_id)): time.sleep(10) def run_interactive(task_id=0): job_kwargs = _get_call_args(job_spec, task_id) logger.debug('Beginning job\nkwargs:\t{}'.format( pprint.pformat(job_kwargs['metadata'], indent=2))) if return_index: job_kwargs.update({'task_id': task_id}) return run_job(interactive=True, **job_kwargs) slurm.run_interactive = run_interactive return slurm
py
1a49ba171b288808e737ba2944307bbd6f412900
# The prime 41, can be written as the sum of six consecutive primes: # 41 = 2 + 3 + 5 + 7 + 11 + 13 # This is the longest sum of consecutive primes that adds to a prime # below one-hundred. # The longest sum of consecutive primes below one-thousand that adds # to a prime, contains 21 terms, and is equal to 953. # Which prime, below one-million, can be written as the sum of the # most consecutive primes? from math import sqrt from copy import copy def generate_primes(limit): is_prime = [True for i in range(limit)] for i in range(2, int(sqrt(limit))): if is_prime[i]: for j in range(i * i, limit, i): is_prime[j] = False return filter(lambda x: is_prime[x], range(2, limit)) primes = generate_primes(1000000) prime_set = set(primes) prime_sum = copy(primes) for i in range(1, len(primes)): prime_sum[i] += prime_sum[i - 1] prime_sum.insert(0, 0) ans = 0 left = 21 right = len(primes) + 1 for i in range(len(primes)): for j in range(i + left, right): p = prime_sum[j] - prime_sum[i] if p > 1000000: right = j break if p in prime_set: if left < j - i: ans, left = p, j - i print ans
py
1a49ba4f681e1723ac7e30961e32745382b6ed45
# coding: utf-8 """ Kubernetes No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: v1.15.9 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kubernetes_asyncio.client.configuration import Configuration class V1alpha1ClusterRoleBinding(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'role_ref': 'V1alpha1RoleRef', 'subjects': 'list[V1alpha1Subject]' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'role_ref': 'roleRef', 'subjects': 'subjects' } def __init__(self, api_version=None, kind=None, metadata=None, role_ref=None, subjects=None, local_vars_configuration=None): # noqa: E501 """V1alpha1ClusterRoleBinding - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._kind = None self._metadata = None self._role_ref = None self._subjects = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata self.role_ref = role_ref if subjects is not None: self.subjects = subjects @property def api_version(self): """Gets the api_version of this V1alpha1ClusterRoleBinding. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1alpha1ClusterRoleBinding. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1alpha1ClusterRoleBinding. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1alpha1ClusterRoleBinding. # noqa: E501 :type: str """ self._api_version = api_version @property def kind(self): """Gets the kind of this V1alpha1ClusterRoleBinding. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1alpha1ClusterRoleBinding. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1alpha1ClusterRoleBinding. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1alpha1ClusterRoleBinding. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1alpha1ClusterRoleBinding. # noqa: E501 :return: The metadata of this V1alpha1ClusterRoleBinding. # noqa: E501 :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1alpha1ClusterRoleBinding. :param metadata: The metadata of this V1alpha1ClusterRoleBinding. # noqa: E501 :type: V1ObjectMeta """ self._metadata = metadata @property def role_ref(self): """Gets the role_ref of this V1alpha1ClusterRoleBinding. # noqa: E501 :return: The role_ref of this V1alpha1ClusterRoleBinding. # noqa: E501 :rtype: V1alpha1RoleRef """ return self._role_ref @role_ref.setter def role_ref(self, role_ref): """Sets the role_ref of this V1alpha1ClusterRoleBinding. :param role_ref: The role_ref of this V1alpha1ClusterRoleBinding. # noqa: E501 :type: V1alpha1RoleRef """ if self.local_vars_configuration.client_side_validation and role_ref is None: # noqa: E501 raise ValueError("Invalid value for `role_ref`, must not be `None`") # noqa: E501 self._role_ref = role_ref @property def subjects(self): """Gets the subjects of this V1alpha1ClusterRoleBinding. # noqa: E501 Subjects holds references to the objects the role applies to. # noqa: E501 :return: The subjects of this V1alpha1ClusterRoleBinding. # noqa: E501 :rtype: list[V1alpha1Subject] """ return self._subjects @subjects.setter def subjects(self, subjects): """Sets the subjects of this V1alpha1ClusterRoleBinding. Subjects holds references to the objects the role applies to. # noqa: E501 :param subjects: The subjects of this V1alpha1ClusterRoleBinding. # noqa: E501 :type: list[V1alpha1Subject] """ self._subjects = subjects def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1alpha1ClusterRoleBinding): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1alpha1ClusterRoleBinding): return True return self.to_dict() != other.to_dict()
py
1a49ba5db1b3f1a3780250639515fd385d2cda86
from flask_login import UserMixin from werkzeug.security import check_password_hash, generate_password_hash from app import db class User(UserMixin, db.Model): """""" __tablename__ = "users" __table_args__ = {'extend_existing': True} id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String) password = db.Column(db.String) character_1 = db.Column(db.PickleType) character_2 = db.Column(db.PickleType) character_3 = db.Column(db.PickleType) character_4 = db.Column(db.PickleType) character_5 = db.Column(db.PickleType) def __init__(self, username): self.username = username def __repr__(self): return '<User {}>'.format(self.username) def set_password(self, password): """Create hashed password.""" self.password = generate_password_hash( password, method='sha256' ) def check_password(self, password): """Check hashed password.""" return check_password_hash(self.password, password) def __repr__(self): return '<User {}>'.format(self.username)
py
1a49badedef9092f9c65208eef889a5fda00d98d
import graphene from graphene.relay import Node from graphene_mongo import MongoengineConnectionField, MongoengineObjectType from .models import Company as CompanyModel, Tool from .models import Tool as ToolModel class Companies(MongoengineObjectType): class Meta: model = CompanyModel interfaces = (Node,) class Tools(MongoengineObjectType): class Meta: model = ToolModel interfaces = (Node,) class Query(graphene.ObjectType): node = Node.Field() all_companies = MongoengineConnectionField(Companies) all_tools = MongoengineConnectionField(Tools) company_by_name = graphene.Field(Companies, name=graphene.String(required=True)) tool_by_name = graphene.Field(Tools, name=graphene.String(required=True)) tool_by_slug = graphene.Field(Tools, slug=graphene.String(required=True)) def resolve_company_by_name(root, info, name): try: return CompanyModel.objects.get(companyName=name) except CompanyModel.DoesNotExist: return None def resolve_tool_by_name(root, info, name): try: return ToolModel.objects.get(name=name) except ToolModel.DoesNotExist: return None def resolve_tool_by_slug(root, info, slug): try: return ToolModel.objects.get(slug=slug) except ToolModel.DoesNotExist: return None schema = graphene.Schema(query=Query, types=[Companies, Tools])
py
1a49baee925cd9e1f81284d9078b5cff1bf6af00
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thrusday 08 Feb 2018 Testing suite for topopy Flow class @author: J. Vicente Perez @email: [email protected] """ import unittest import sys import numpy as np # Add to the path code folder and data folder sys.path.append("../") from topopy import Grid, Flow, Network class CreateNetwork(unittest.TestCase): def test_create_load(self): dem_files = ['tunez', 'tunez2', 'small25'] for filename in dem_files: fd = Flow("data/fd_{0}.tif".format(filename)) st = Network(fd, 1000) computed = [st.get_dims(), st.get_size(), st.get_ncells(), st.get_cellsize(), st.get_geotransform(), st.get_projection()] expected = [fd.get_dims(), fd.get_size(), fd.get_ncells(), fd.get_cellsize(), fd.get_geotransform(), fd.get_projection()] self.assertTrue(computed, expected) def test_streams(self): dem_files = ['tunez', 'tunez2', 'small25'] for filename in dem_files: fd = Flow("data/fd_{0}.tif".format(filename)) st = Network(fd, 1000) streams = st.get_streams() st01 = streams.read_array() st02 = Grid("data/str_{0}.tif".format(filename)).read_array() self.assertTrue(np.array_equal(st01, st02), True) def test_streampoi(self): dem_files = ['tunez', 'tunez2', 'small25'] for filename in dem_files: fd = Flow("data/fd_{0}.tif".format(filename)) st = Network(fd, 1000) kinds = ['heads', 'confluences', 'outlets'] for kind in kinds: poi = st.get_stream_poi(kind) rows = poi[0].reshape((poi[0].size, 1)) cols = poi[1].reshape((poi[1].size, 1)) comp_poi = np.append(rows, cols, axis=1) exp_poi = np.load("data/mlab_files/{0}_{1}.npy".format(filename, kind)) self.assertEqual(np.array_equal(comp_poi, exp_poi), True) def test_stream_segments(self): dem_files = ['tunez', 'tunez2', 'small25'] for filename in dem_files: fd = Flow("data/fd_{0}.tif".format(filename)) st = Network(fd, 1000) ssegments = st.get_stream_segments(False) esegments = Grid("data/mlab_files/{0}_segments.tif".format(filename)).read_array() self.assertTrue(np.array_equal(ssegments, esegments), True) def test_stream_order(self): dem_files = ['tunez', 'tunez2', 'small25'] for filename in dem_files: fd = Flow("data/fd_{0}.tif".format(filename)) st = Network(fd, 1000) for kind in ['strahler', 'shreeve']: exp_order = st.get_stream_order(kind = kind, asgrid=False) cmp_order = Grid("data/mlab_files/{0}_{1}.tif".format(filename, kind)).read_array() self.assertTrue(np.array_equal(exp_order, cmp_order), True) if __name__ == "__main__": unittest.main()
py
1a49bb66d91e9f77ce92695c6cc57b334c778039
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import sys, glob sys.path.insert(0, './gen-py') lib_path = glob.glob('../../lib/py/build/lib.*') if lib_path: sys.path.insert(0, lib_path[0]) if sys.version_info[0] >= 3: xrange = range from ThriftTest import ThriftTest from ThriftTest.ttypes import * from thrift.transport import TTransport from thrift.transport import TSocket from thrift.protocol import TBinaryProtocol import unittest import time import socket import random from optparse import OptionParser class TimeoutTest(unittest.TestCase): def setUp(self): for i in xrange(50): try: # find a port we can use self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.port = random.randint(10000, 30000) self.listen_sock.bind(('localhost', self.port)) self.listen_sock.listen(5) break except Exception: if i == 49: raise def testConnectTimeout(self): starttime = time.time() try: leaky = [] for _ in xrange(100): socket = TSocket.TSocket('localhost', self.port) socket.setTimeout(10) socket.open() leaky.append(socket) except Exception: self.assert_(time.time() - starttime < 5.0) def testWriteTimeout(self): starttime = time.time() try: socket = TSocket.TSocket('localhost', self.port) socket.setTimeout(10) socket.open() self.listen_sock.accept() while True: socket.write("hi" * 100) except Exception: self.assert_(time.time() - starttime < 5.0) def suite(): suite = unittest.TestSuite() loader = unittest.TestLoader() suite.addTest(loader.loadTestsFromTestCase(TimeoutTest)) return suite if __name__ == "__main__": testRunner = unittest.TextTestRunner(verbosity=2) testRunner.run(suite())
py
1a49bb786aa3bba58b2eabf32dc3eb857c529556
# coding=utf-8 """Module for exposing patreon subscribers as a list of rooms and mxids entitled to those rooms.""" from collections import namedtuple from collections import defaultdict from ps3.addressbook.AddressBook import AddressBook from ps3.subscribers.patreon.PatreonClient import Patreon Reward = namedtuple('Reward', ['room_alias', 'minimum_donation']) REWARDS = [Reward('#linear-supporters:matrix.org', 100), Reward('#quadratic-supporters:matrix.org', 500), Reward('#polynomial-supporters:matrix.org', 1000), Reward('#elliptic-supporters:matrix.org', 5000)] class PatreonGuestList(object): """Filters patreon users into room guest lists.""" def __init__(self, patroniser_bot, creator_access_token): self._patroniser_bot = patroniser_bot self._client = Patreon(creator_access_token) self._address_book = AddressBook() def _patreon_users_guest_list(self, campaign_id): """Return a dict of room names to tuples of (entitled members, a flag indicating whether this is the top tier they qualify for)""" guest_list = {reward.room_alias: [] for reward in REWARDS} for patron in self._client.patrons(campaign_id): top_reward_tier = max([reward for reward in REWARDS if patron.amount >= reward.minimum_donation], key=lambda r: r.minimum_donation) for reward in REWARDS: if patron.active and patron.amount >= reward.minimum_donation: guest_list[reward.room_alias].append((patron, reward==top_reward_tier)) return guest_list def guest_list(self, campaign_id, skip_lookup=False, create_room=True, verbose=False): mxid_guest_list = defaultdict(list) # Resolving guests via Matrix is s l o w; we end up doing it for each # instance of the guest for each room they're entitled to be in. # Rather than do the obvious restructuring this could benefit from, # let's just track the guests we've already looked up via Matrix once # this go and not bother to look them up again. looked_up_guests = [] for room, guests in self._patreon_users_guest_list(campaign_id).iteritems(): for (guest, is_top_tier) in guests: mxids = self._address_book.get_mxids(guest.pid) if len(mxids) > 0: if verbose: print 'PA: Found Patreon "%s" in address book; resolved to %s' % (guest.name, ','.join(mxids)) for mxid in mxids: mxid_guest_list[room].append((mxid, is_top_tier)) elif skip_lookup: print 'PA: Couldn\'t find Patreon "%s" in address book; skipping matrix lookup' % guest.name elif guest.pid in looked_up_guests: print 'PA: Couldn\'t find Patreon "%s" in address book; already looked up via Matrix this session so skipping for now.' % guest.name else: looked_up_guests.append(guest.pid) mxid = self._patroniser_bot.get_mxid_from_patron(guest, verbose, create_room=create_room) if mxid != None: print 'PA: Patreon missing from address book; Newly resolved Patreon "%s" to %s' % (guest.name, mxid) self._address_book.add(guest.pid, mxid) mxid_guest_list[room].append((mxid, is_top_tier)) else: print 'PA: Patreon missing from address book and has not yet joined their lookup room: %s, %s, %s, %s' % (guest.name, guest.pid, guest.email, guest.amount) return mxid_guest_list
py
1a49bc955469de8ffa5b9fba3bb5e11ae6102101
import logging import os import tempfile from contextlib import contextmanager from typing import TYPE_CHECKING, Optional from funcy import cached_property, first from dvc import fs from dvc.exceptions import DvcException from dvc.utils import dict_sha256, relpath from dvc_data.transfer import _log_exceptions if TYPE_CHECKING: from dvc_objects.db import ObjectDB logger = logging.getLogger(__name__) class RunCacheNotFoundError(DvcException): def __init__(self, stage): super().__init__(f"No run-cache for {stage.addressing}") def _get_cache_hash(cache, key=False): from dvc_objects.meta import Meta if key: cache["outs"] = [out["path"] for out in cache.get("outs", [])] return dict_sha256(cache, exclude=[Meta.PARAM_SIZE, Meta.PARAM_NFILES]) def _can_hash(stage): if stage.is_callback or stage.always_changed: return False if not all([stage.cmd, stage.deps, stage.outs]): return False for dep in stage.deps: if not (dep.protocol == "local" and dep.def_path and dep.get_hash()): return False for out in stage.outs: if out.protocol != "local" or not out.def_path or out.persist: return False return True def _get_stage_hash(stage): from .serialize import to_single_stage_lockfile assert _can_hash(stage) return _get_cache_hash(to_single_stage_lockfile(stage), key=True) class StageCache: def __init__(self, repo): self.repo = repo @cached_property def cache_dir(self): return os.path.join(self.repo.odb.local.cache_dir, "runs") def _get_cache_dir(self, key): return os.path.join(self.cache_dir, key[:2], key) def _get_cache_path(self, key, value): return os.path.join(self._get_cache_dir(key), value) def _load_cache(self, key, value): from voluptuous import Invalid from dvc.schema import COMPILED_LOCK_FILE_STAGE_SCHEMA from dvc.utils.serialize import YAMLFileCorruptedError, load_yaml path = self._get_cache_path(key, value) try: return COMPILED_LOCK_FILE_STAGE_SCHEMA(load_yaml(path)) except FileNotFoundError: return None except (YAMLFileCorruptedError, Invalid): logger.warning("corrupted cache file '%s'.", relpath(path)) os.unlink(path) return None def _load(self, stage): key = _get_stage_hash(stage) if not key: return None cache_dir = self._get_cache_dir(key) if not os.path.exists(cache_dir): return None for value in os.listdir(cache_dir): cache = self._load_cache(key, value) if cache: return cache return None def _create_stage(self, cache, wdir=None): from . import PipelineStage, create_stage from .loader import StageLoader stage = create_stage( PipelineStage, repo=self.repo, path="dvc.yaml", cmd=cache["cmd"], wdir=wdir, outs=[out["path"] for out in cache["outs"]], external=True, ) StageLoader.fill_from_lock(stage, cache) return stage @contextmanager def _cache_type_copy(self): cache_types = self.repo.odb.local.cache_types self.repo.odb.local.cache_types = ["copy"] try: yield finally: self.repo.odb.local.cache_types = cache_types def _uncached_outs(self, stage, cache): # NOTE: using temporary stage to avoid accidentally modifying original # stage and to workaround `commit/checkout` not working for uncached # outputs. cached_stage = self._create_stage(cache, wdir=stage.wdir) outs_no_cache = [ out.def_path for out in stage.outs if not out.use_cache ] # NOTE: using copy link to make it look like a git-tracked file with self._cache_type_copy(): for out in cached_stage.outs: if out.def_path in outs_no_cache: yield out def save(self, stage): from .serialize import to_single_stage_lockfile if not _can_hash(stage): return cache_key = _get_stage_hash(stage) cache = to_single_stage_lockfile(stage) cache_value = _get_cache_hash(cache) existing_cache = self._load_cache(cache_key, cache_value) cache = existing_cache or cache for out in self._uncached_outs(stage, cache): out.commit() if existing_cache: return from dvc.schema import COMPILED_LOCK_FILE_STAGE_SCHEMA from dvc.utils.serialize import dump_yaml # sanity check COMPILED_LOCK_FILE_STAGE_SCHEMA(cache) path = self._get_cache_path(cache_key, cache_value) parent = self.repo.odb.local.fs.path.parent(path) self.repo.odb.local.makedirs(parent) tmp = tempfile.NamedTemporaryFile(delete=False, dir=parent).name assert os.path.exists(parent) assert os.path.isdir(parent) dump_yaml(tmp, cache) self.repo.odb.local.move(tmp, path) def restore(self, stage, run_cache=True, pull=False): from .serialize import to_single_stage_lockfile if not _can_hash(stage): raise RunCacheNotFoundError(stage) if ( not stage.changed_stage() and stage.deps_cached() and all(bool(out.hash_info) for out in stage.outs) ): cache = to_single_stage_lockfile(stage) else: if not run_cache: # backward compatibility raise RunCacheNotFoundError(stage) stage.save_deps() cache = self._load(stage) if not cache: raise RunCacheNotFoundError(stage) cached_stage = self._create_stage(cache, wdir=stage.wdir) if pull: for objs in cached_stage.get_used_objs().values(): self.repo.cloud.pull(objs) if not cached_stage.outs_cached(): raise RunCacheNotFoundError(stage) logger.info( "Stage '%s' is cached - skipping run, checking out outputs", stage.addressing, ) cached_stage.checkout() def transfer(self, from_odb, to_odb): from dvc.fs.callbacks import Callback from_fs = from_odb.fs to_fs = to_odb.fs func = _log_exceptions(fs.generic.copy) runs = from_fs.path.join(from_odb.fs_path, "runs") ret = [] if not from_fs.exists(runs): return ret for src in from_fs.find(runs): rel = from_fs.path.relpath(src, from_odb.fs_path) dst = to_fs.path.join(to_odb.fs_path, rel) key = to_fs.path.parent(dst) # check if any build cache already exists for this key # TODO: check if MaxKeys=1 or something like that applies # or otherwise this will take a lot of time! if to_fs.exists(key) and first(to_fs.find(key)): continue src_name = from_fs.path.name(src) parent_name = from_fs.path.name(from_fs.path.parent(src)) with Callback.as_tqdm_callback( desc=src_name, bytes=True, ) as cb: func(from_fs, src, to_fs, dst, callback=cb) ret.append((parent_name, src_name)) return ret def push(self, remote: Optional[str], odb: Optional["ObjectDB"] = None): dest_odb = odb or self.repo.cloud.get_remote_odb(remote) return self.transfer(self.repo.odb.local, dest_odb) def pull(self, remote: Optional[str], odb: Optional["ObjectDB"] = None): odb = odb or self.repo.cloud.get_remote_odb(remote) return self.transfer(odb, self.repo.odb.local) def get_used_objs(self, used_run_cache, *args, **kwargs): """Return used cache for the specified run-cached stages.""" from collections import defaultdict used_objs = defaultdict(set) for key, value in used_run_cache: entry = self._load_cache(key, value) if not entry: continue stage = self._create_stage(entry) for odb, objs in stage.get_used_objs(*args, **kwargs).items(): used_objs[odb].update(objs) return used_objs
py
1a49bce80342663553dc0b25999bab33143415db
# -*- coding: utf-8 -*- import getopt import sys import matplotlib.pyplot as plt from simulation import Simulation def main(argv): population_size = 20 individual_size = 8 delta = 0.005 cross_point_count = 1 verbose = False try: opts, args = getopt.getopt(argv, 'hvp:i:c:d:', [ 'help', 'verbose', 'population-size=', 'individual-size=', 'cross-points=', 'delta=' ]) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt in ('-h', '--help'): usage() sys.exit() if opt in ('-v', '--verbose'): verbose = True if opt in ('-p', '--population-size'): population_size = int(arg) if opt in ('-i', '--individual-size'): individual_size = int(arg) if opt in ('-c', '--cross-points'): cross_point_count = int(arg) if opt in ('-d', '--delta'): delta = float(arg) print("pop size = {}, ind size = {}, cross points = {}, delta = {}".format( population_size, individual_size, cross_point_count, delta)) simulation = Simulation( population_size, individual_size, cross_point_count, delta) simulation.run_simulation() if verbose: print(simulation.population) # Ploting x_values = range(0, len(simulation.global_fitness_records)) y_values = [ simulation.global_fitness_records, simulation.convergence_values ] labels = ["Average fitness score", "Convergence values"] for y_array, label in zip(y_values, labels): plt.plot(x_values, y_array, label=label) plt.legend() plt.show() def usage(): print(""" Usage : python genetic.py [OPTIONS] You can specify the following options : -v, --verbose: If specified the last population will be printed out in the terminal. -p, --population-size [integer]: The size of the population that will be use in the genetic algorithm. -u, --individual-size [integer] The number of gene of each individual member of the population. -d, --delta [float] A value for the convergence detection. The less this value will be, the longer the algorithm will take to stop but the greater the average fitness value will be. This value must be less than 1. """) if __name__ == "__main__": main(sys.argv[1:])
py
1a49bd9fa7d7163f7acb063a111faa1fc3faef30
from proteus import * from twp_navier_stokes_p import * from dambreak_Ubbink_coarse import * if timeDiscretization=='vbdf': timeIntegration = VBDF timeOrder=2 stepController = Min_dt_cfl_controller elif timeDiscretization=='flcbdf': timeIntegration = FLCBDF #stepController = FLCBDF_controller_sys stepController = Min_dt_cfl_controller time_tol = 10.0*ns_nl_atol_res atol_u = {1:time_tol,2:time_tol,3:time_tol} rtol_u = {1:time_tol,2:time_tol,3:time_tol} else: timeIntegration = BackwardEuler_cfl stepController = Min_dt_cfl_controller femSpaces = {0:basis, 1:basis, 2:basis, 3:basis} massLumping = False numericalFluxType = None conservativeFlux = None numericalFluxType = RANS2P.NumericalFlux subgridError = RANS2P.SubgridError(coefficients,nd,lag=ns_lag_subgridError,hFactor=hFactor) shockCapturing = RANS2P.ShockCapturing(coefficients,nd,ns_shockCapturingFactor,lag=ns_lag_shockCapturing) fullNewtonFlag = True multilevelNonlinearSolver = Newton levelNonlinearSolver = Newton nonlinearSmoother = None linearSmoother = SimpleNavierStokes3D matrix = SparseMatrix if useOldPETSc: multilevelLinearSolver = PETSc levelLinearSolver = PETSc else: multilevelLinearSolver = KSP_petsc4py levelLinearSolver = KSP_petsc4py if useSuperlu: multilevelLinearSolver = LU levelLinearSolver = LU linear_solver_options_prefix = 'rans2p_' levelNonlinearSolverConvergenceTest = 'r' linearSolverConvergenceTest = 'r-true' tolFac = 0.0 linTolFac = 0.01 l_atol_res = 0.01*vof_nl_atol_res nl_atol_res = ns_nl_atol_res useEisenstatWalker = False maxNonlinearIts = 50 maxLineSearches = 0 conservativeFlux = {0:'pwl-bdm-opt'} #auxiliaryVariables=[pointGauges,lineGauges]
py
1a49bf56da80dd8da63a7c47b577b0080462b5e4
# Copyright 2019 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Protocol for objects that are mixtures (probabilistic combinations).""" from typing import Any, Sequence, Tuple, Union import numpy as np from typing_extensions import Protocol from cirq.protocols.unitary import has_unitary from cirq.type_workarounds import NotImplementedType # This is a special indicator value used by the inverse method to determine # whether or not the caller provided a 'default' argument. RaiseTypeErrorIfNotProvided = ((0.0, []),) # type: Sequence[Tuple[float, Any]] class SupportsMixture(Protocol): """An object that may be describable as a probabilistic combination. """ def _mixture_(self) -> Union[ Sequence[Tuple[float, Any]], NotImplementedType]: """Return the probabilistic mixture. A mixture is described by an iterable of tuples of the form (probability of object, object) The probability components of the tuples must sum to 1.0 and be between 0 and 1 (inclusive). Returns: A tuple of (probability of object, object) """ def _has_mixture_(self) -> bool: """Whether this value has a mixture representation. This method is used by the global `cirq.has_mixture` method. If this method is not present, or returns NotImplemented, it will fallback to using _mixture_ with a default value, or False if neither exist. Returns: True if the value has a mixture representation, Falseotherwise. """ def mixture( val: Any, default: Any = RaiseTypeErrorIfNotProvided) -> Sequence[Tuple[float, Any]]: """Return a sequence of tuples representing a probabilistic combination. A mixture is described by an iterable of tuples of the form (probability of object, object) The probability components of the tuples must sum to 1.0 and be between 0 and 1 (inclusive). Args: val: The value whose mixture is being computed. default: A default value if val does not support mixture. Returns: An iterable of tuples of size 2. The first element of the tuple is a probability (between 0 and 1) and the second is the object that occurs with that probability in the mixture. The probabilities will sum to 1.0. """ getter = getattr(val, '_mixture_', None) result = NotImplemented if getter is None else getter() if result is not NotImplemented: return result if default is not RaiseTypeErrorIfNotProvided: return default if getter is None: raise TypeError( "object of type '{}' has no _mixture_ method.".format(type(val))) raise TypeError("object of type '{}' does have a _mixture_ method, " "but it returned NotImplemented.".format(type(val))) def has_mixture(val: Any) -> bool: """Returns whether the value has a mixture representation. Returns: If `val` has a `_has_mixture_` method and its result is not NotImplemented, that result is returned. Otherwise, if the value has a `_mixture_` method return True if that has a non-default value. Returns False if neither function exists. """ getter = getattr(val, '_has_mixture_', None) result = NotImplemented if getter is None else getter() if result is not NotImplemented: return result # No _has_mixture_ function, use _mixture_ instead return mixture(val, None) is not None def mixture_channel( val: Any, default: Any = RaiseTypeErrorIfNotProvided) -> Sequence[ Tuple[float, np.ndarray]]: """Return a sequence of tuples for a channel that is a mixture of unitaries. In contrast to `mixture` this method falls back to `unitary` if `_mixture_` is not implemented. A mixture channel is described by an iterable of tuples of the form (probability of unitary, unitary) The probability components of the tuples must sum to 1.0 and be between 0 and 1 (inclusive) and the `unitary` must be a unitary matrix. Args: val: The value whose mixture_channel is being computed. default: A default value if val does not support mixture. Returns: An iterable of tuples of size 2. The first element of the tuple is a probability (between 0 and 1) and the second is the unitary that occurs with that probability. The probabilities will sum to 1.0. """ mixture_getter = getattr(val, '_mixture_', None) result = NotImplemented if mixture_getter is None else mixture_getter() if result is not NotImplemented: return result unitary_getter = getattr(val, '_unitary_', None) result = NotImplemented if unitary_getter is None else unitary_getter() if result is not NotImplemented: return ((1.0, result),) if default is not RaiseTypeErrorIfNotProvided: return default if mixture_getter is None and unitary_getter is None: raise TypeError( "object of type '{}' has no _mixture_ or _unitary_ method." .format(type(val))) raise TypeError("object of type '{}' does have a _mixture_ or _unitary_ " "method, but it returned NotImplemented.".format(type(val))) def has_mixture_channel(val: Any) -> bool: """Returns whether the value has a mixture channel representation. In contrast to `has_mixture` this method falls back to checking whether the value has a unitary representation via `has_channel`. Returns: If `val` has a `_has_mixture_` method and its result is not NotImplemented, that result is returned. Otherwise, if `val` has a `_has_unitary_` method and its results is not NotImplemented, that result is returned. Otherwise, if the value has a `_mixture_` method that is not a non-default value, True is returned. Returns False if none of these functions. """ mixture_getter = getattr(val, '_has_mixture_', None) result = NotImplemented if mixture_getter is None else mixture_getter() if result is not NotImplemented: return result result = has_unitary(val) if result is not NotImplemented and result: return result # No _has_mixture_ or _has_unitary_ function, use _mixture_ instead. return mixture_channel(val, None) is not None def validate_mixture(supports_mixture: SupportsMixture): """Validates that the mixture's tuple are valid probabilities.""" mixture_tuple = mixture(supports_mixture, None) if mixture_tuple is None: raise TypeError('{}_mixture did not have a _mixture_ method'.format( supports_mixture)) def validate_probability(p, p_str): if p < 0: raise ValueError('{} was less than 0.'.format(p_str)) elif p > 1: raise ValueError('{} was greater than 1.'.format(p_str)) total = 0.0 for p, val in mixture_tuple: validate_probability(p, '{}\'s probability'.format(str(val))) total += p if not np.isclose(total, 1.0): raise ValueError('Sum of probabilities of a mixture was not 1.0')
py
1a49bfd0cb96ae52961624c2c698a4c15347bfb0
from huobi.client.trade import TradeClient from huobi.constant import * from huobi.utils import * import time symbol_test = "eosusdt" client_order_id_header = str(int(time.time())) client_order_id_test = "client_" + client_order_id_header +"_order" # unique id in 24hours account_id = g_account_id trade_client = TradeClient(api_key=g_api_key, secret_key=g_secret_key) order_id = trade_client.create_order(symbol=symbol_test, account_id=account_id, order_type=OrderType.BUY_LIMIT, source=OrderSource.API, amount=20, price=0.26, client_order_id=client_order_id_test, stop_price=0.11, operator="gte") LogInfo.output("======= create new order id : {order_id} with client id {client_id} =======".format(order_id=(order_id), client_id=client_order_id_test)) orderObj = trade_client.get_order(order_id=order_id) LogInfo.output("======= get order by order id : {order_id} =======".format(order_id=order_id)) orderObj.print_object() orderObj = trade_client.get_order_by_client_order_id(client_order_id=client_order_id_test) LogInfo.output("======= get order by client order id : {client_id} =======".format(client_id=client_order_id_test)) orderObj.print_object() trade_client.cancel_client_order(client_order_id=client_order_id_test) LogInfo.output("======= cancel order by client order id : {client_id} =======".format(client_id=client_order_id_test)) orderObj = trade_client.get_order_by_client_order_id(client_order_id=client_order_id_test) LogInfo.output("======= get order by client order id : {client_id} after cancel =======".format(client_id=client_order_id_test)) orderObj.print_object()
py
1a49bffd2f55e5c1d9db4c889111c1aed8f35159
"""Config flow for HVV integration.""" import logging from pygti.auth import GTI_DEFAULT_HOST from pygti.exceptions import CannotConnect, InvalidAuth import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_HOST, CONF_OFFSET, CONF_PASSWORD, CONF_USERNAME from homeassistant.core import callback from homeassistant.helpers import aiohttp_client import homeassistant.helpers.config_validation as cv from .const import ( # pylint:disable=unused-import CONF_FILTER, CONF_REAL_TIME, CONF_STATION, DOMAIN, ) from .hub import GTIHub _LOGGER = logging.getLogger(__name__) SCHEMA_STEP_USER = vol.Schema( { vol.Required(CONF_HOST, default=GTI_DEFAULT_HOST): str, vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str, } ) SCHEMA_STEP_STATION = vol.Schema({vol.Required(CONF_STATION): str}) SCHEMA_STEP_OPTIONS = vol.Schema( { vol.Required(CONF_FILTER): vol.In([]), vol.Required(CONF_OFFSET, default=0): cv.positive_int, vol.Optional(CONF_REAL_TIME, default=True): bool, } ) class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for HVV.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL def __init__(self): """Initialize component.""" self.hub = None self.data = None self.stations = {} async def async_step_user(self, user_input=None): """Handle the initial step.""" errors = {} if user_input is not None: session = aiohttp_client.async_get_clientsession(self.hass) self.hub = GTIHub( user_input[CONF_HOST], user_input[CONF_USERNAME], user_input[CONF_PASSWORD], session, ) try: response = await self.hub.authenticate() _LOGGER.debug("Init gti: %r", response) except CannotConnect: errors["base"] = "cannot_connect" except InvalidAuth: errors["base"] = "invalid_auth" if not errors: self.data = user_input return await self.async_step_station() return self.async_show_form( step_id="user", data_schema=SCHEMA_STEP_USER, errors=errors ) async def async_step_station(self, user_input=None): """Handle the step where the user inputs his/her station.""" if user_input is not None: errors = {} check_name = await self.hub.gti.checkName( {"theName": {"name": user_input[CONF_STATION]}, "maxList": 20} ) stations = check_name.get("results") self.stations = { f"{station.get('name')}": station for station in stations if station.get("type") == "STATION" } if not self.stations: errors["base"] = "no_results" return self.async_show_form( step_id="station", data_schema=SCHEMA_STEP_STATION, errors=errors ) # schema return await self.async_step_station_select() return self.async_show_form(step_id="station", data_schema=SCHEMA_STEP_STATION) async def async_step_station_select(self, user_input=None): """Handle the step where the user inputs his/her station.""" schema = vol.Schema({vol.Required(CONF_STATION): vol.In(list(self.stations))}) if user_input is None: return self.async_show_form(step_id="station_select", data_schema=schema) self.data.update({"station": self.stations[user_input[CONF_STATION]]}) title = self.data[CONF_STATION]["name"] return self.async_create_entry(title=title, data=self.data) @staticmethod @callback def async_get_options_flow(config_entry): """Get options flow.""" return OptionsFlowHandler(config_entry) class OptionsFlowHandler(config_entries.OptionsFlow): """Options flow handler.""" def __init__(self, config_entry): """Initialize HVV Departures options flow.""" self.config_entry = config_entry self.options = dict(config_entry.options) self.departure_filters = {} self.hub = None async def async_step_init(self, user_input=None): """Manage the options.""" errors = {} if not self.departure_filters: departure_list = {} self.hub = self.hass.data[DOMAIN][self.config_entry.entry_id] try: departure_list = await self.hub.gti.departureList( { "station": self.config_entry.data[CONF_STATION], "time": {"date": "heute", "time": "jetzt"}, "maxList": 5, "maxTimeOffset": 200, "useRealtime": True, "returnFilters": True, } ) except CannotConnect: errors["base"] = "cannot_connect" except InvalidAuth: errors["base"] = "invalid_auth" if not errors: self.departure_filters = { str(i): departure_filter for i, departure_filter in enumerate(departure_list.get("filter")) } if user_input is not None and not errors: options = { CONF_FILTER: [ self.departure_filters[x] for x in user_input[CONF_FILTER] ], CONF_OFFSET: user_input[CONF_OFFSET], CONF_REAL_TIME: user_input[CONF_REAL_TIME], } return self.async_create_entry(title="", data=options) if CONF_FILTER in self.config_entry.options: old_filter = [ i for (i, f) in self.departure_filters.items() if f in self.config_entry.options.get(CONF_FILTER) ] else: old_filter = [] return self.async_show_form( step_id="init", data_schema=vol.Schema( { vol.Optional(CONF_FILTER, default=old_filter): cv.multi_select( { key: f"{departure_filter['serviceName']}, {departure_filter['label']}" for key, departure_filter in self.departure_filters.items() } ), vol.Required( CONF_OFFSET, default=self.config_entry.options.get(CONF_OFFSET, 0), ): cv.positive_int, vol.Optional( CONF_REAL_TIME, default=self.config_entry.options.get(CONF_REAL_TIME, True), ): bool, } ), errors=errors, )
py
1a49c06d5fbbcaa30f09040bfc55214fb473f387
from .hmm import HMMNumpy, HMMTensorflow
py
1a49c0c7dce870709f820de4cc5a1aa6954f737f
# Copyright 2019 The Shaderc Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the expect module.""" import expect from glslc_test_framework import TestStatus import re import unittest class TestStdoutMatchADotC(expect.StdoutMatch): expected_stdout = re.compile('a.c') class TestExpect(unittest.TestCase): def test_get_object_name(self): """Tests get_object_filename().""" source_and_object_names = [('a.vert', 'a.vert.spv'), ('b.frag', 'b.frag.spv'), ('c.tesc', 'c.tesc.spv'), ('d.tese', 'd.tese.spv'), ('e.geom', 'e.geom.spv'), ('f.comp', 'f.comp.spv'), ('file', 'file.spv'), ('file.', 'file.spv'), ('file.uk', 'file.spv'), ('file.vert.', 'file.vert.spv'), ('file.vert.bla', 'file.vert.spv')] actual_object_names = [ expect.get_object_filename(f[0]) for f in source_and_object_names ] expected_object_names = [f[1] for f in source_and_object_names] self.assertEqual(actual_object_names, expected_object_names) def test_stdout_match_regex_has_match(self): test = TestStdoutMatchADotC() status = TestStatus( test_manager=None, returncode=0, stdout=b'0abc1', stderr=None, directory=None, inputs=None, input_filenames=None) self.assertTrue(test.check_stdout_match(status)[0]) def test_stdout_match_regex_no_match(self): test = TestStdoutMatchADotC() status = TestStatus( test_manager=None, returncode=0, stdout=b'ab', stderr=None, directory=None, inputs=None, input_filenames=None) self.assertFalse(test.check_stdout_match(status)[0]) def test_stdout_match_regex_empty_stdout(self): test = TestStdoutMatchADotC() status = TestStatus( test_manager=None, returncode=0, stdout=b'', stderr=None, directory=None, inputs=None, input_filenames=None) self.assertFalse(test.check_stdout_match(status)[0])
bzl
1a49c2a3e6b9326a935e30d3a6151e85627e1019
load("@bazel_skylib//lib:shell.bzl", "shell") def _openapi_generate_go(ctx): generate = { "types": ctx.attr.types, "server": ctx.attr.server, "client": ctx.attr.client, "spec": ctx.attr.spec, } out_files = [] for k, v in generate.items(): if not v: continue out_file = ctx.actions.declare_file(k + ".gen.go") generate_kind = k if generate_kind == "server": generate_kind = "chi-server" cmd = "{bin} -package {package} -generate {generate} -o {out}".format( bin = ctx.executable._oapi_codegen.path, package = shell.quote(ctx.attr.package), generate = generate_kind, out = shell.quote(out_file.path), ) extra_inputs = [] if generate_kind == "types" and ctx.file.types_excludes != None: cmd = cmd + " -exclude-schemas $(cat {excludes_file})".format( excludes_file = shell.quote(ctx.file.types_excludes.path), ) extra_inputs.append(ctx.file.types_excludes) # Source files must be the last input to the command. cmd += " {src}".format( src = shell.quote(ctx.file.src.path), ) ctx.actions.run_shell( outputs = [out_file], inputs = [ctx.file.src] + extra_inputs, tools = [ctx.executable._oapi_codegen], command = cmd, mnemonic = "OpenAPIGoGen", use_default_shell_env = True, ) out_files.append(out_file) return [ DefaultInfo(files = depset(out_files)), ] openapi_generate_go = rule( implementation = _openapi_generate_go, doc = "This rule generate Go files from a given open API specification.", attrs = { "src": attr.label( doc = "The input specification file.", allow_single_file = [".yml"], ), "package": attr.string( doc = "The Go package the generated code should live in.", default = "api", ), "types": attr.bool( doc = "Whether the types file should be generated", default = True, ), "types_excludes": attr.label( doc = "The file containing the schema list to exclude during the types generation.", allow_single_file = True, ), "server": attr.bool( doc = "Whether the server code should be generated", default = True, ), "client": attr.bool( doc = "Whehter the client code should be generated", default = True, ), "spec": attr.bool( doc = "Whether the spec code should be generated", default = True, ), "_oapi_codegen": attr.label( doc = "The code generator binary.", default = "@com_github_deepmap_oapi_codegen//cmd/oapi-codegen:oapi-codegen", executable = True, cfg = "target", ), }, )
py
1a49c41ed3d8ccc224c0bbdce1329e66c8dedb12
# _*_ coding: utf-8 _*_ # Copyright (c) Nikita Kovaliov, maizy.ru, 2013 # See LICENSE.txt for details. import sys import nose from vnc_me_tests import VNC_ME_TESTS_ROOT if __name__ in ('__main__', 'vnc_me_tests.__main__'): argv = sys.argv[:] argv.append(VNC_ME_TESTS_ROOT) nose.run_exit(argv=argv)
py
1a49c45d3a913fc331e397e346d140fe308119ed
""" 3) Faça uma função que receba uma string como parâmetro e retorne quantas palavras há na string. """ sample_sentence = "Noivinha do Aristides." def count_words(sentence): return len(sentence.split(" ")) print("Amount of words: ", str(count_words(sample_sentence)))
py
1a49c5675537f0f6df4292f64d780d296e2ac1be
from fuzzysearch import find_near_matches from . import x_execer, filepath, x_env import subprocess def f_search(keyword: str, paths: [str]): res = [] for p in paths: if find_near_matches(keyword, p, max_l_dist=0) != []: # ヒット res.append(p) return res def interactive(l: [str]): if len(l) <= 0: return None str_list = repr("\n".join(l)) tmpFile = x_env["ENHANCD_DIR"] / "tmp" filterCmd = filepath.split_filterlist(x_env["ENHANCD_FILTER"]) if filterCmd is None: return None x_execer.eval(f'echo {str_list} | {filterCmd} > {str(tmpFile)}') file = open(tmpFile, "r") select = str(file.read()) file.close() tmpFile.unlink() if select is None: return None return select.rstrip("\n")
py
1a49c605d691a178bfad47d562683d455fb8da7a
import random import argparse from functools import partial import numpy as np import paddle import paddle.distributed as dist from paddle.io import DataLoader, DistributedBatchSampler, BatchSampler from paddlenlp.data import Pad # yapf: disable def parse_args(): parser = argparse.ArgumentParser(__doc__) parser.add_argument('--model_name_or_path', type=str, default='unified_transformer-12L-cn-luge', help='The path or shortcut name of the pre-trained model.') parser.add_argument('--save_dir', type=str, default='./checkpoints', help='The directory where the checkpoints will be saved.') parser.add_argument('--output_path', type=str, default='./predict.txt', help='The file path where the infer result will be saved.') parser.add_argument('--logging_steps', type=int, default=100, help='Log every X updates steps.') parser.add_argument('--save_steps', type=int, default=1000, help='Save checkpoint every X updates steps.') parser.add_argument('--seed', type=int, default=2021, help='Random seed for initialization.') parser.add_argument('--batch_size', type=int, default=16, help='Batch size per GPU/CPU for training.') parser.add_argument('--lr', type=float, default=5e-5, help='The initial learning rate.') parser.add_argument('--weight_decay', type=float, default=0.01, help='The weight decay for optimizer.') parser.add_argument('--epochs', type=int, default=3, help='Total number of training epochs to perform.') parser.add_argument('--warmup_steps', type=int, default=2500, help='The number of warmup steps.') parser.add_argument('--max_grad_norm', type=float, default=0.1, help='The max value of grad norm.') parser.add_argument('--max_seq_len', type=int, default=512, help='The maximum sequence length of training.') parser.add_argument('--max_response_len', type=int, default=128, help='The maximum response sequence length of training.') parser.add_argument('--max_knowledge_len', type=int, default=256, help='The maximum knowledge sequence length of training.') parser.add_argument('--min_dec_len', type=int, default=1, help='The minimum sequence length of generation.') parser.add_argument('--max_dec_len', type=int, default=64, help='The maximum sequence length of generation.') parser.add_argument('--num_samples', type=int, default=1, help='The decode numbers in generation.') parser.add_argument('--decode_strategy', type=str, default='sampling', help='The decode strategy in generation.') parser.add_argument('--top_k', type=int, default=0, help='The number of highest probability vocabulary tokens to keep for top-k sampling.') parser.add_argument('--temperature', type=float, default=1.0, help='The value used to module the next token probabilities.') parser.add_argument('--top_p', type=float, default=1.0, help='The cumulative probability for top-p sampling.') parser.add_argument('--num_beams', type=int, default=0, help='The number of beams for beam search.') parser.add_argument('--length_penalty', type=float, default=1.0, help='The exponential penalty to the sequence length for beam search.') parser.add_argument('--early_stopping', type=eval, default=False, help='Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not.') parser.add_argument('--device', type=str, default='gpu', help='The device to select for training the model.') args = parser.parse_args() return args # yapf: enable def print_args(args): print('----------- Configuration Arguments -----------') for arg, value in sorted(vars(args).items()): print('%s: %s' % (arg, value)) print('------------------------------------------------') def set_seed(seed): # Use the same data seed(for data shuffle) for all procs to guarantee data # consistency after sharding. random.seed(seed) np.random.seed(seed) # Maybe different op seeds(for dropout) for different procs is better. paddle.seed(seed + dist.get_rank()) def preprocess_examples(examples, mode='train'): """ For training set and dev set, treat each utterance of the first speaker as the response, and concatenate the goal, knowledge and the dialog’s previous utterances as the history. In this way, multiple history-response pairs are constructed. """ if mode == 'test': return examples new_examples = [] for example in examples: conversation = example['conversation'] for i in range(0, len(conversation), 2): new_examples.append({ 'goal': example['goal'], 'knowledge': example['knowledge'], 'history': conversation[:i], 'response': conversation[i] }) return new_examples def convert_example(example, tokenizer, max_seq_len=512, max_response_len=128, max_knowledge_len=256, mode='train'): """Convert all examples into necessary features.""" goal = example['goal'] knowledge = example['knowledge'] goal_knowledge = ' '.join([' '.join(lst) for lst in goal + knowledge]) if mode != 'test': tokenized_example = tokenizer.dialogue_encode( example['history'], response=example['response'], knowledge=goal_knowledge, task_type='knowledge', max_seq_len=max_seq_len, max_response_len=max_response_len, max_knowledge_len=max_knowledge_len, return_length=True) response_start = tokenized_example['input_ids'].index( tokenizer.cls_token_id, 1) response_end = tokenized_example['seq_len'] # Use to gather the logits corresponding to the labels during training tokenized_example['masked_positions'] = list( range(response_start, response_end - 1)) tokenized_example['labels'] = tokenized_example['input_ids'][ response_start + 1:response_end] return tokenized_example else: tokenized_example = tokenizer.dialogue_encode( example['history'], knowledge=goal_knowledge, task_type='knowledge', max_seq_len=max_seq_len, max_knowledge_len=max_knowledge_len, add_start_token_as_response=True) if 'response' in example: tokenized_example['response'] = example['response'] return tokenized_example def batchify_fn(batch_examples, pad_val, mode): def pad_mask(batch_attention_mask): batch_size = len(batch_attention_mask) max_len = max(map(len, batch_attention_mask)) attention_mask = np.ones( (batch_size, max_len, max_len), dtype='float32') * -1e9 for i, mask_data in enumerate(attention_mask): seq_len = len(batch_attention_mask[i]) mask_data[-seq_len:, -seq_len:] = np.array( batch_attention_mask[i], dtype='float32') # In order to ensure the correct broadcasting mechanism, expand one # dimension to the second dimension (n_head of Transformer). attention_mask = np.expand_dims(attention_mask, axis=1) return attention_mask pad_func = Pad(pad_val=pad_val, pad_right=False) input_ids = pad_func([example['input_ids'] for example in batch_examples]) token_type_ids = pad_func( [example['token_type_ids'] for example in batch_examples]) position_ids = pad_func( [example['position_ids'] for example in batch_examples]) attention_mask = pad_mask( [example['attention_mask'] for example in batch_examples]) if mode != 'test': max_len = max([example['seq_len'] for example in batch_examples]) masked_positions = np.concatenate([ np.array(example['masked_positions']) + (max_len - example['seq_len']) + i * max_len for i, example in enumerate(batch_examples) ]) labels = np.concatenate( [np.array(example['labels']) for example in batch_examples]) return input_ids, token_type_ids, position_ids, attention_mask, masked_positions, labels else: return input_ids, token_type_ids, position_ids, attention_mask def create_data_loader(dataset, tokenizer, args, mode): trans_func1 = partial(preprocess_examples, mode=mode) trans_func2 = partial( convert_example, tokenizer=tokenizer, max_seq_len=args.max_seq_len, max_response_len=args.max_response_len, max_knowledge_len=args.max_knowledge_len, mode=mode) dataset = dataset.map(trans_func1, batched=True).map(trans_func2, lazy=True) if mode == 'train': batch_sampler = DistributedBatchSampler( dataset, batch_size=args.batch_size, shuffle=True) else: batch_sampler = BatchSampler( dataset, batch_size=args.batch_size, shuffle=False) collate_fn = partial(batchify_fn, pad_val=tokenizer.pad_token_id, mode=mode) data_loader = DataLoader( dataset, batch_sampler=batch_sampler, collate_fn=collate_fn, return_list=True) return dataset, data_loader def post_process_response(token_ids, tokenizer): """Post-process the decoded sequence. Truncate from the first <eos>.""" eos_pos = len(token_ids) for i, tok_id in enumerate(token_ids): if tok_id == tokenizer.sep_token_id: eos_pos = i break token_ids = token_ids[:eos_pos] tokens = tokenizer.convert_ids_to_tokens(token_ids) tokens = tokenizer.merge_subword(tokens) return token_ids, tokens def get_in_turn_repetition(pred, is_cn=False): """Get in-turn repetition.""" if len(pred) == 0: return 1.0 if isinstance(pred[0], str): pred = [tok.lower() for tok in pred] if is_cn: pred = "".join(pred) tri_grams = set() for i in range(len(pred) - 2): tri_gram = tuple(pred[i:i + 3]) if tri_gram in tri_grams: return True tri_grams.add(tri_gram) return False def select_response(ids, scores, tokenizer, max_dec_len=None, num_samples=1): ids = ids.numpy().tolist() scores = scores.numpy() if len(ids) != len(scores) or (len(ids) % num_samples) != 0: raise ValueError( "the length of `ids` is {}, but the `num_samples` is {}".format( len(ids), num_samples)) group = [] tmp = [] for pred, score in zip(ids, scores): pred_token_ids, pred_tokens = post_process_response(pred, tokenizer) num_token = len(pred_token_ids) response = " ".join(pred_tokens) in_turn_repetition = get_in_turn_repetition( pred_tokens, True) or get_in_turn_repetition(pred_token_ids) # not ending if max_dec_len is not None and num_token >= max_dec_len: score -= 1e3 elif in_turn_repetition: score -= 1e3 tmp.append([response, score]) if len(tmp) == num_samples: group.append(tmp) tmp = [] results = [] for preds in group: preds = sorted(preds, key=lambda x: -x[1]) results.append(preds[0][0]) return results
py
1a49c7f34a5ff664991700bced3ebdd961fc4685
from modules.GameLevel import GameLevel from modules.interfaces.gameEndIterface import gameEndIterface from modules.interfaces.gameStartInterface import gameStartInterface from modules.interfaces.switchLevelIterface import switchLevelIterface
py
1a49c87e4f3b26ee97e1bcfedca9498324a64457
#!/usr/bin/python # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example creates a flash display creative. Requires a flash asset, backup image asset, and an advertiser ID as input. To get an advertiser ID, run get_advertisers.py. """ import argparse import sys from apiclient.http import MediaFileUpload from oauth2client import client import dfareporting_utils # Declare command-line flags. argparser = argparse.ArgumentParser(add_help=False) argparser.add_argument( 'profile_id', type=int, help='The ID of the profile to add a user role for') argparser.add_argument( 'advertiser_id', type=int, help='The ID of the advertiser to associate this creative with.') argparser.add_argument( 'size_id', type=int, help='The ID of the size of this creative.') argparser.add_argument( 'flash_asset_name', help='Suggested name to use for the uploaded creative asset.') argparser.add_argument( 'path_to_flash_asset_file', help='Path to the asset file to be uploaded.') argparser.add_argument( 'backup_image_name', help='Suggested name to use for the uploaded creative asset.') argparser.add_argument( 'path_to_backup_image_file', help='Path to the asset file to be uploaded.') def main(argv): # Retrieve command line arguments. flags = dfareporting_utils.get_arguments(argv, __doc__, parents=[argparser]) # Authenticate and construct service. service = dfareporting_utils.setup(flags) profile_id = flags.profile_id advertiser_id = flags.advertiser_id backup_image_name = flags.backup_image_name flash_asset_name = flags.flash_asset_name path_to_backup_image_file = flags.path_to_backup_image_file path_to_flash_asset_file = flags.path_to_flash_asset_file size_id = flags.size_id try: # Upload the flash asset flash_asset_id = upload_creative_asset( service, profile_id, advertiser_id, flash_asset_name, path_to_flash_asset_file, 'FLASH') # Upload the backup image asset backup_image_asset_id = upload_creative_asset( service, profile_id, advertiser_id, backup_image_name, path_to_backup_image_file, 'HTML_IMAGE') # Construct the creative structure. creative = { 'advertiserId': advertiser_id, 'backupImageClickThroughUrl': 'https://www.google.com', 'backupImageReportingLabel': 'backup_image_exit', 'backupImageTargetWindow': {'targetWindowOption': 'NEW_WINDOW'}, 'clickTags': [{ 'eventName': 'exit', 'name': 'click_tag', 'value': 'https://www.google.com' }], 'creativeAssets': [ {'assetIdentifier': flash_asset_id, 'role': 'PRIMARY', 'windowMode': 'TRANSPARENT'}, {'assetIdentifier': backup_image_asset_id, 'role': 'BACKUP_IMAGE'}, ], 'name': 'Test flash display creative', 'size': {'id': size_id}, 'type': 'ENHANCED_BANNER' } request = service.creatives().insert(profileId=profile_id, body=creative) # Execute request and print response. response = request.execute() print ('Created flash display creative with ID %s and name "%s".' % (response['id'], response['name'])) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize') def upload_creative_asset( service, profile_id, advertiser_id, asset_name, path_to_asset_file, asset_type): """Uploads a creative asset and returns an assetIdentifier.""" # Construct the creative asset metadata creative_asset = { 'assetIdentifier': { 'name': asset_name, 'type': asset_type } } media = MediaFileUpload(path_to_asset_file) if not media.mimetype(): media = MediaFileUpload(path_to_asset_file, 'application/octet-stream') response = service.creativeAssets().insert( advertiserId=advertiser_id, profileId=profile_id, media_body=media, body=creative_asset).execute() return response['assetIdentifier'] if __name__ == '__main__': main(sys.argv)
py
1a49c8b879fb332a1efbb27e8548589d30835428
from datetime import datetime, timezone from itertools import cycle from .lame import LAME from .mt import MT def filetime_to_dt(timestamp: int) -> datetime: return datetime.fromtimestamp(timestamp // 100000000, timezone.utc) def bytes_to_bitstring(data: bytes) -> str: return "".join(bin(x)[2:].zfill(8) for x in data) class BitStream: def __init__(self, data: bytes) -> None: self.data = bytes_to_bitstring(data) def get_bits(self, num: int) -> int: out = int(self.data[:num], 2) self.data = self.data[num:] return out def xor(data: bytes, key: bytes) -> bytes: return bytes(a ^ b for a, b in zip(data, cycle(key))) def decrypt_lame(data: bytes, seed: int) -> bytes: lame = LAME() lame.srand(seed) return bytes([x ^ lame.get_next() for x in data]) def decrypt_mt(data: bytes, seed: int) -> bytes: key = MT(seed).get_bytes(len(data)) return xor(data, key) def crc_data(data: bytes) -> int: if len(data) == 0: return 0 dwKey_ECX = 0 dwKey_ESI = 1 for b in data: dwKey_ESI = (b + dwKey_ESI) % 0xFFF1 dwKey_ECX = (dwKey_ECX + dwKey_ESI) % 0xFFF1 return (dwKey_ECX << 0x10) + dwKey_ESI
py
1a49c9da946145ce54c0cda56cf8139fef8672f8
# Copyright 2012 Red Hat, Inc. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ gettext for openstack-common modules. Usual usage in an openstack.common module: from heatclient.openstack.common.gettextutils import _ """ import copy import functools import gettext import locale from logging import handlers import os import re from babel import localedata import six _localedir = os.environ.get('heatclient'.upper() + '_LOCALEDIR') _t = gettext.translation('heatclient', localedir=_localedir, fallback=True) # We use separate translation catalogs for each log level, so set up a # mapping between the log level name and the translator. The domain # for the log level is project_name + "-log-" + log_level so messages # for each level end up in their own catalog. _t_log_levels = dict( (level, gettext.translation('heatclient' + '-log-' + level, localedir=_localedir, fallback=True)) for level in ['info', 'warning', 'error', 'critical'] ) _AVAILABLE_LANGUAGES = {} USE_LAZY = False def enable_lazy(): """Convenience function for configuring _() to use lazy gettext Call this at the start of execution to enable the gettextutils._ function to use lazy gettext functionality. This is useful if your project is importing _ directly instead of using the gettextutils.install() way of importing the _ function. """ global USE_LAZY USE_LAZY = True def _(msg): if USE_LAZY: return Message(msg, domain='heatclient') else: if six.PY3: return _t.gettext(msg) return _t.ugettext(msg) def _log_translation(msg, level): """Build a single translation of a log message """ if USE_LAZY: return Message(msg, domain='heatclient' + '-log-' + level) else: translator = _t_log_levels[level] if six.PY3: return translator.gettext(msg) return translator.ugettext(msg) # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = functools.partial(_log_translation, level='info') _LW = functools.partial(_log_translation, level='warning') _LE = functools.partial(_log_translation, level='error') _LC = functools.partial(_log_translation, level='critical') def install(domain, lazy=False): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's install() function. The main difference from gettext.install() is that we allow overriding the default localedir (e.g. /usr/share/locale) using a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). :param domain: the translation domain :param lazy: indicates whether or not to install the lazy _() function. The lazy _() introduces a way to do deferred translation of messages by installing a _ that builds Message objects, instead of strings, which can then be lazily translated into any available locale. """ if lazy: # NOTE(mrodden): Lazy gettext functionality. # # The following introduces a deferred way to do translations on # messages in OpenStack. We override the standard _() function # and % (format string) operation to build Message objects that can # later be translated when we have more information. def _lazy_gettext(msg): """Create and return a Message object. Lazy gettext function for a given domain, it is a factory method for a project/module to get a lazy gettext function for its own translation domain (i.e. nova, glance, cinder, etc.) Message encapsulates a string so that we can translate it later when needed. """ return Message(msg, domain=domain) from six import moves moves.builtins.__dict__['_'] = _lazy_gettext else: localedir = '%s_LOCALEDIR' % domain.upper() if six.PY3: gettext.install(domain, localedir=os.environ.get(localedir)) else: gettext.install(domain, localedir=os.environ.get(localedir), unicode=True) class Message(six.text_type): """A Message object is a unicode object that can be translated. Translation of Message is done explicitly using the translate() method. For all non-translation intents and purposes, a Message is simply unicode, and can be treated as such. """ def __new__(cls, msgid, msgtext=None, params=None, domain='heatclient', *args): """Create a new Message object. In order for translation to work gettext requires a message ID, this msgid will be used as the base unicode text. It is also possible for the msgid and the base unicode text to be different by passing the msgtext parameter. """ # If the base msgtext is not given, we use the default translation # of the msgid (which is in English) just in case the system locale is # not English, so that the base text will be in that locale by default. if not msgtext: msgtext = Message._translate_msgid(msgid, domain) # We want to initialize the parent unicode with the actual object that # would have been plain unicode if 'Message' was not enabled. msg = super(Message, cls).__new__(cls, msgtext) msg.msgid = msgid msg.domain = domain msg.params = params return msg def translate(self, desired_locale=None): """Translate this message to the desired locale. :param desired_locale: The desired locale to translate the message to, if no locale is provided the message will be translated to the system's default locale. :returns: the translated message in unicode """ translated_message = Message._translate_msgid(self.msgid, self.domain, desired_locale) if self.params is None: # No need for more translation return translated_message # This Message object may have been formatted with one or more # Message objects as substitution arguments, given either as a single # argument, part of a tuple, or as one or more values in a dictionary. # When translating this Message we need to translate those Messages too translated_params = _translate_args(self.params, desired_locale) translated_message = translated_message % translated_params return translated_message @staticmethod def _translate_msgid(msgid, domain, desired_locale=None): if not desired_locale: system_locale = locale.getdefaultlocale() # If the system locale is not available to the runtime use English if not system_locale[0]: desired_locale = 'en_US' else: desired_locale = system_locale[0] locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') lang = gettext.translation(domain, localedir=locale_dir, languages=[desired_locale], fallback=True) if six.PY3: translator = lang.gettext else: translator = lang.ugettext translated_message = translator(msgid) return translated_message def __mod__(self, other): # When we mod a Message we want the actual operation to be performed # by the parent class (i.e. unicode()), the only thing we do here is # save the original msgid and the parameters in case of a translation params = self._sanitize_mod_params(other) unicode_mod = super(Message, self).__mod__(params) modded = Message(self.msgid, msgtext=unicode_mod, params=params, domain=self.domain) return modded def _sanitize_mod_params(self, other): """Sanitize the object being modded with this Message. - Add support for modding 'None' so translation supports it - Trim the modded object, which can be a large dictionary, to only those keys that would actually be used in a translation - Snapshot the object being modded, in case the message is translated, it will be used as it was when the Message was created """ if other is None: params = (other,) elif isinstance(other, dict): params = self._trim_dictionary_parameters(other) else: params = self._copy_param(other) return params def _trim_dictionary_parameters(self, dict_param): """Return a dict that only has matching entries in the msgid.""" # NOTE(luisg): Here we trim down the dictionary passed as parameters # to avoid carrying a lot of unnecessary weight around in the message # object, for example if someone passes in Message() % locals() but # only some params are used, and additionally we prevent errors for # non-deepcopyable objects by unicoding() them. # Look for %(param) keys in msgid; # Skip %% and deal with the case where % is first character on the line keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid) # If we don't find any %(param) keys but have a %s if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid): # Apparently the full dictionary is the parameter params = self._copy_param(dict_param) else: params = {} # Save our existing parameters as defaults to protect # ourselves from losing values if we are called through an # (erroneous) chain that builds a valid Message with # arguments, and then does something like "msg % kwds" # where kwds is an empty dictionary. src = {} if isinstance(self.params, dict): src.update(self.params) src.update(dict_param) for key in keys: params[key] = self._copy_param(src[key]) return params def _copy_param(self, param): try: return copy.deepcopy(param) except TypeError: # Fallback to casting to unicode this will handle the # python code-like objects that can't be deep-copied return six.text_type(param) def __add__(self, other): msg = _('Message objects do not support addition.') raise TypeError(msg) def __radd__(self, other): return self.__add__(other) def __str__(self): # NOTE(luisg): Logging in python 2.6 tries to str() log records, # and it expects specifically a UnicodeError in order to proceed. msg = _('Message objects do not support str() because they may ' 'contain non-ascii characters. ' 'Please use unicode() or translate() instead.') raise UnicodeError(msg) def get_available_languages(domain): """Lists the available languages for the given translation domain. :param domain: the domain to get languages for """ if domain in _AVAILABLE_LANGUAGES: return copy.copy(_AVAILABLE_LANGUAGES[domain]) localedir = '%s_LOCALEDIR' % domain.upper() find = lambda x: gettext.find(domain, localedir=os.environ.get(localedir), languages=[x]) # NOTE(mrodden): en_US should always be available (and first in case # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was # renamed to locale_identifiers() in >=1.0, the requirements master list # requires >=0.9.6, uncapped, so defensively work with both. We can remove # this check when the master list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() for i in locale_identifiers: if find(i) is not None: language_list.append(i) # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they # are perfectly legitimate locales: # https://github.com/mitsuhiko/babel/issues/37 # In Babel 1.3 they fixed the bug and they support these locales, but # they are still not explicitly "listed" by locale_identifiers(). # That is why we add the locales here explicitly if necessary so that # they are listed as supported. aliases = {'zh': 'zh_CN', 'zh_Hant_HK': 'zh_HK', 'zh_Hant': 'zh_TW', 'fil': 'tl_PH'} for (locale, alias) in six.iteritems(aliases): if locale in language_list and alias not in language_list: language_list.append(alias) _AVAILABLE_LANGUAGES[domain] = language_list return copy.copy(language_list) def translate(obj, desired_locale=None): """Gets the translated unicode representation of the given object. If the object is not translatable it is returned as-is. If the locale is None the object is translated to the system locale. :param obj: the object to translate :param desired_locale: the locale to translate the message to, if None the default system locale will be used :returns: the translated object in unicode, or the original object if it could not be translated """ message = obj if not isinstance(message, Message): # If the object to translate is not already translatable, # let's first get its unicode representation message = six.text_type(obj) if isinstance(message, Message): # Even after unicoding() we still need to check if we are # running with translatable unicode before translating return message.translate(desired_locale) return obj def _translate_args(args, desired_locale=None): """Translates all the translatable elements of the given arguments object. This method is used for translating the translatable values in method arguments which include values of tuples or dictionaries. If the object is not a tuple or a dictionary the object itself is translated if it is translatable. If the locale is None the object is translated to the system locale. :param args: the args to translate :param desired_locale: the locale to translate the args to, if None the default system locale will be used :returns: a new args object with the translated contents of the original """ if isinstance(args, tuple): return tuple(translate(v, desired_locale) for v in args) if isinstance(args, dict): translated_dict = {} for (k, v) in six.iteritems(args): translated_v = translate(v, desired_locale) translated_dict[k] = translated_v return translated_dict return translate(args, desired_locale) class TranslationHandler(handlers.MemoryHandler): """Handler that translates records before logging them. The TranslationHandler takes a locale and a target logging.Handler object to forward LogRecord objects to after translating them. This handler depends on Message objects being logged, instead of regular strings. The handler can be configured declaratively in the logging.conf as follows: [handlers] keys = translatedlog, translator [handler_translatedlog] class = handlers.WatchedFileHandler args = ('/var/log/api-localized.log',) formatter = context [handler_translator] class = openstack.common.log.TranslationHandler target = translatedlog args = ('zh_CN',) If the specified locale is not available in the system, the handler will log in the default locale. """ def __init__(self, locale=None, target=None): """Initialize a TranslationHandler :param locale: locale to use for translating messages :param target: logging.Handler object to forward LogRecord objects to after translation """ # NOTE(luisg): In order to allow this handler to be a wrapper for # other handlers, such as a FileHandler, and still be able to # configure it using logging.conf, this handler has to extend # MemoryHandler because only the MemoryHandlers' logging.conf # parsing is implemented such that it accepts a target handler. handlers.MemoryHandler.__init__(self, capacity=0, target=target) self.locale = locale def setFormatter(self, fmt): self.target.setFormatter(fmt) def emit(self, record): # We save the message from the original record to restore it # after translation, so other handlers are not affected by this original_msg = record.msg original_args = record.args try: self._translate_and_log_record(record) finally: record.msg = original_msg record.args = original_args def _translate_and_log_record(self, record): record.msg = translate(record.msg, self.locale) # In addition to translating the message, we also need to translate # arguments that were passed to the log method that were not part # of the main message e.g., log.info(_('Some message %s'), this_one)) record.args = _translate_args(record.args, self.locale) self.target.emit(record)
py
1a49ca5bfa6e0089afef8c892dec50ac7716496f
#!/usr/bin/env python import sys import os.path from os.path import join as PJ import re import json import numpy as np from tqdm import tqdm import igraph as ig import louvain import math import jgf import graph_tool as gt; import graph_tool.inference as gtInference; # import infomap def isFloat(value): if(value is None): return False try: numericValue = float(value) return np.isfinite(numericValue) except ValueError: return False class NumpyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64)): ret = int(obj) elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)): ret = float(obj) elif isinstance(obj, (np.ndarray,)): ret = obj.tolist() else: ret = json.JSONEncoder.default(self, obj) if isinstance(ret, (float)): if math.isnan(ret): ret = None if isinstance(ret, (bytes, bytearray)): ret = ret.decode("utf-8") return ret results = {"errors": [], "warnings": [], "brainlife": [], "datatype_tags": [], "tags": []} def warning(msg): global results results['warnings'].append(msg) #results['brainlife'].append({"type": "warning", "msg": msg}) print(msg) def error(msg): global results results['errors'].append(msg) #results['brainlife'].append({"type": "error", "msg": msg}) print(msg) def exitApp(): global results with open("product.json", "w") as fp: json.dump(results, fp, cls=NumpyEncoder) if len(results["errors"]) > 0: sys.exit(1) else: sys.exit() def exitAppWithError(msg): global results results['errors'].append(msg) #results['brainlife'].append({"type": "error", "msg": msg}) print(msg) exitApp() def louvain_find_partition_multiplex(graphs, partition_type,layer_weights=None, seed=None, **kwargs): """ Detect communities for multiplex graphs. Each graph should be defined on the same set of vertices, only the edges may differ for different graphs. See :func:`Optimiser.optimise_partition_multiplex` for a more detailed explanation. Parameters ---------- graphs : list of :class:`ig.Graph` List of :class:`louvain.VertexPartition` layers to optimise. partition_type : type of :class:`MutableVertexPartition` The type of partition to use for optimisation (identical for all graphs). seed : int Seed for the random number generator. By default uses a random seed if nothing is specified. **kwargs Remaining keyword arguments, passed on to constructor of ``partition_type``. Returns ------- list of int membership of nodes. float Improvement in quality of combined partitions, see :func:`Optimiser.optimise_partition_multiplex`. Notes ----- We don't return a partition in this case because a partition is always defined on a single graph. We therefore simply return the membership (which is the same for all layers). See Also -------- :func:`Optimiser.optimise_partition_multiplex` :func:`slices_to_layers` Examples -------- >>> n = 100 >>> G_1 = ig.Graph.Lattice([n], 1) >>> G_2 = ig.Graph.Lattice([n], 1) >>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2], ... louvain.ModularityVertexPartition) """ n_layers = len(graphs) partitions = [] if(layer_weights is None): layer_weights = [1]*n_layers for graph in graphs: partitions.append(partition_type(graph, **kwargs)) optimiser = louvain.Optimiser() if (not seed is None): optimiser.set_rng_seed(seed) improvement = optimiser.optimise_partition_multiplex(partitions, layer_weights) return partitions[0].membership, improvement def SBMMinimizeMembership(graphs,layerWeights=None, weightMode = "real-normal"): layered = (len(graphs)>1) graph = graphs[0] vertexCount = graph.vcount() g = gt.Graph(directed=graph.is_directed()) for _ in range(vertexCount): g.add_vertex() weighted = "weights" in graph.edge_attributes() if(weighted): weightsProperty = g.new_edge_property("double") if(layered): layerProperty = g.new_edge_property("int32_t") for graphIndex, graph in enumerate(graphs): if(weighted): weightMultiplier = 1 if(layered and layerWeights is not None): weightMultiplier = layerWeights[graphIndex] for edge in graph.es: gedge = g.add_edge(edge.source,edge.target) if(weighted): weight = weightMultiplier*edge["weight"] weightsProperty[gedge] = weight if(layered): layerProperty[gedge] = graphIndex if(weighted): g.edge_properties["weight"] = weightsProperty if(layered): g.edge_properties["layer"] = layerProperty state_args = {} if(weighted): state_args["recs"] = [g.ep.weight] state_args["rec_types"] = [weightMode] if(layered): state_args["ec"] = g.ep.layer state_args["layers"] = True # print(state_args) state = gtInference.minimize.minimize_blockmodel_dl(g,deg_corr=True, layers=layered, state_args=state_args); return list(state.get_blocks()) # def infomapMembership(g, parameters): # vertexCount = g.vcount() # infomapSimple = infomap.Infomap(parameters) # infomapSimple.setVerbosity(0) # infoNetwork = infomapSimple.network() # for nodeIndex in range(0,vertexCount): # infoNetwork.addNode(nodeIndex) # weighted = "weights" in g.edge_attributes() # for edge in edges: # weight = 1.0 # if(weighted): # weight = edge["weight"]; # infoNetwork.addLink(edge.source, edge.target) # infomapSimple.run() # membership = [0]*vertexCount # # print("Result") # # print("\n#node module") # for node in infomapSimple.iterTree(): # if node.isLeaf(): # # print((node.physicalId,node.moduleIndex())); # membership[node.physicalId] = node.moduleIndex(); # return membership; configFilename = "config.json" argCount = len(sys.argv) if(argCount > 1): configFilename = sys.argv[1] outputDirectory = "output" outputFile = PJ(outputDirectory,"network.json.gz") if(not os.path.exists(outputDirectory)): os.makedirs(outputDirectory) with open(configFilename, "r") as fd: config = json.load(fd) communiMethod = "louvain" infomap_trials = 10 louvain_resolution = 1.0 louvain_quality_function = "modularity" assymetricNegativeWeights = True if("method" in config): communiMethod = config["method"].lower() if("louvain-quality-function" in config and config["louvain-quality-function"]): louvain_quality_function = config["louvain-quality-function"].lower() if("louvain-resolution" in config and isFloat(config["louvain-resolution"])): louvain_resolution = float(config["louvain-resolution"]) if("infomap-trials" in config and config["infomap-trials"]): infomap_trials = int(config["infomap-trials"]) if("assymetric-negative" in config): assymetricNegativeWeights = config["assymetric-negative"] networks = jgf.igraph.load(config["network"], compressed=True) outputNetworks = [] for network in tqdm(networks): weighted = "weight" in network.edge_attributes() layered = False if(weighted): signed = np.any(np.array(network.es["weight"])<0) if(signed): network_pos = network.subgraph_edges(network.es.select(weight_gt = 0), delete_vertices=False) network_neg = network.subgraph_edges(network.es.select(weight_lt = 0), delete_vertices=False) network_neg.es['weight'] = [-w for w in network_neg.es['weight']] layerNetworks = [network_pos,network_neg] layerWeights = [1,-1] layerNames = ["positive","negative"] layered=True if("layer" in network.edge_attributes()): if("edge-layer-weights" in network.attributes()): layerNames = list(network["edge-layer-weights"].keys()) layerWeights = list(network["edge-layer-weights"].values()) else: layerNames = list(set(network.es["layer"])) layerWeights = [1]*len(layerNames) layerNetworks = [] for layerIndex,layerName in enumerate(layerNames): layerNetwork = network.subgraph_edges(network.es.select(layer_eq = layerName), delete_vertices=False) layerNetworks.append(layerNetwork) layered = True if(communiMethod=="louvain"): # optimiser = louvain.Optimiser() # diff = optimiser.optimise_partition_multiplex( # [part_pos, part_neg] hasResolution = False if(layered): modularityWeights = layerWeights partitionFunction = louvain.ModularityVertexPartition if(louvain_quality_function=="modularity"): partitionFunction = louvain.ModularityVertexPartition if(layered and assymetricNegativeWeights): layerSizes = [g.ecount() for g in layerNetworks] allCount = np.sum(layerSizes) modularityWeights = [layerWeights[layerIndex]*layerSizes[layerIndex]/allCount for layerIndex in range(len(layerWeights))] modularityWeights[0] = 1.0 elif(louvain_quality_function=="rbconfiguration"): partitionFunction = louvain.RBConfigurationVertexPartition hasResolution = True if(layered and assymetricNegativeWeights): layerSizes = [g.ecount() for g in layerNetworks] allCount = np.sum(layerSizes) modularityWeights = [layerWeights[layerIndex]/allCount for layerIndex in range(len(layerWeights))] modularityWeights[0] = 1.0/layerWeights[0] elif(louvain_quality_function=="rber"): partitionFunction = louvain.RBERVertexPartition hasResolution = True if(layered and assymetricNegativeWeights): layerSizes = [g.ecount() for g in layerNetworks] allCount = np.sum(layerSizes) modularityWeights = [layerWeights[layerIndex]/allCount for layerIndex in range(len(layerWeights))] modularityWeights[0] = 1.0/layerWeights[0] elif(louvain_quality_function=="cpm"): partitionFunction = louvain.CPMVertexPartition hasResolution = True if(layered and assymetricNegativeWeights): layerSizes = [g.ecount() for g in layerNetworks] allCount = np.sum(layerSizes) modularityWeights = [layerWeights[layerIndex]/allCount for layerIndex in range(len(layerWeights))] modularityWeights[0] = 1.0/layerWeights[0] elif(louvain_quality_function=="significance"): partitionFunction = louvain.SignificanceVertexPartition hasResolution = False if(weighted): exitAppWithError("Significance quality does not work for weighted networks") elif(louvain_quality_function=="surprise"): partitionFunction = louvain.SurpriseVertexPartition hasResolution = False if(layered and assymetricNegativeWeights): layerSizes = [g.ecount() for g in layerNetworks] allCount = np.sum(layerSizes) modularityWeights = [layerWeights[layerIndex]/allCount for layerIndex in range(len(layerWeights))] modularityWeights[0] = 1.0/layerWeights[0] else: exitAppWithError("Invalid louvain method.") if(layered): if(hasResolution): membership, improv = louvain_find_partition_multiplex(layerNetworks,partitionFunction, layer_weights=modularityWeights,resolution_parameter=louvain_resolution,weights="weight") else: membership, improv = louvain_find_partition_multiplex(layerNetworks,partitionFunction, layer_weights=modularityWeights,weights="weight") else: if(hasResolution): membership = louvain.find_partition(network,partitionFunction, weights="weight",resolution_parameter=louvain_resolution).membership else: membership = louvain.find_partition(network,partitionFunction, weights="weight").membership elif(communiMethod=="infomap"): if(signed): exitAppWithError("Infomap does not work for negative weights.") else: membership = network.community_infomap(edge_weights="weight",trials=infomap_trials).membership elif(communiMethod=="sbm"): if(layered): membership = SBMMinimizeMembership(layerNetworks,layerWeights = layerWeights) else: membership = SBMMinimizeMembership([network]) else: exitAppWithError("Invalid community detection method (%s)."%communiMethod) network.vs["Community"] = membership outputNetworks.append(network) jgf.igraph.save(outputNetworks, outputFile, compressed=True) exitApp()
py
1a49ca7073ed0e503d21b17f233125abe8df5c58
#!/usr/bin/env python # # Copyright (c) 2009-2013, Luke Maurits <[email protected]> # All rights reserved. # With contributions from: # * Chris Clark # * Klein Stephane # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. __version__ = '0.7.2' import copy import csv import random import re import sys import textwrap import unicodedata # from ax.utils.six.moves import zip, map py3k = sys.version_info[0] >= 3 if py3k: str = str str = str uni_chr = chr from html.parser import HTMLParser else: uni_chr = chr from html.parser import HTMLParser if py3k and sys.version_info[1] >= 2: from html import escape else: from cgi import escape # hrule styles FRAME = 0 ALL = 1 NONE = 2 HEADER = 3 # Table styles DEFAULT = 10 MSWORD_FRIENDLY = 11 PLAIN_COLUMNS = 12 RANDOM = 20 _re = re.compile('\\033\\[[0-9;]*m') def _get_size(text): lines = text.split('\n') height = len(lines) width = max([_str_block_width(line) for line in lines]) return (width, height) class PrettyTable(object): def __init__(self, field_names=None, **kwargs): """Return a new PrettyTable instance Arguments: encoding - Unicode encoding scheme used to decode any encoded input field_names - list or tuple of field names fields - list or tuple of field names to include in displays start - index of first data row to include in output end - index of last data row to include in output PLUS ONE (list slice style) header - print a header showing field names (True or False) header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None) border - print a border around the table (True or False) hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, HEADER, ALL, NONE vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE int_format - controls formatting of integer data float_format - controls formatting of floating point data padding_width - number of spaces on either side of column data (only used if left and right paddings are None) left_padding_width - number of spaces on left hand side of column data right_padding_width - number of spaces on right hand side of column data vertical_char - single character string used to draw vertical lines horizontal_char - single character string used to draw horizontal lines junction_char - single character string used to draw line junctions sortby - name of field to sort rows by sort_key - sorting key function, applied to data points before sorting valign - default valign for each row (None, "t", "m" or "b") reversesort - True or False to sort in descending or ascending order""" self.encoding = kwargs.get('encoding', 'UTF-8') # Data self._field_names = [] self._align = {} self._valign = {} self._max_width = {} self._rows = [] if field_names: self.field_names = field_names else: self._widths = [] # Options self._options = 'start end fields header border sortby reversesort sort_key attributes format hrules vrules'.split() self._options.extend( 'int_format float_format padding_width left_padding_width right_padding_width'.split() ) self._options.extend( 'vertical_char horizontal_char junction_char header_style valign xhtml print_empty'.split() ) for option in self._options: if option in kwargs: self._validate_option(option, kwargs[option]) else: kwargs[option] = None self._start = kwargs['start'] or 0 self._end = kwargs['end'] or None self._fields = kwargs['fields'] or None if kwargs['header'] in (True, False): self._header = kwargs['header'] else: self._header = True self._header_style = kwargs['header_style'] or None if kwargs['border'] in (True, False): self._border = kwargs['border'] else: self._border = True self._hrules = kwargs['hrules'] or FRAME self._vrules = kwargs['vrules'] or ALL self._sortby = kwargs['sortby'] or None if kwargs['reversesort'] in (True, False): self._reversesort = kwargs['reversesort'] else: self._reversesort = False self._sort_key = kwargs['sort_key'] or (lambda x: x) self._int_format = kwargs['int_format'] or {} self._float_format = kwargs['float_format'] or {} self._padding_width = kwargs['padding_width'] or 1 self._left_padding_width = kwargs['left_padding_width'] or None self._right_padding_width = kwargs['right_padding_width'] or None self._vertical_char = kwargs['vertical_char'] or self._unicode('|') self._horizontal_char = kwargs['horizontal_char'] or self._unicode('-') self._junction_char = kwargs['junction_char'] or self._unicode('+') if kwargs['print_empty'] in (True, False): self._print_empty = kwargs['print_empty'] else: self._print_empty = True self._format = kwargs['format'] or False self._xhtml = kwargs['xhtml'] or False self._attributes = kwargs['attributes'] or {} def _unicode(self, value): if not isinstance(value, str): value = str(value) if not isinstance(value, str): value = str(value, self.encoding, 'strict') return value def _justify(self, text, width, align): excess = width - _str_block_width(text) if align == 'l': return text + excess * ' ' elif align == 'r': return excess * ' ' + text else: if excess % 2: # Uneven padding # Put more space on right if text is of odd length... if _str_block_width(text) % 2: return (excess // 2) * ' ' + text + (excess // 2 + 1) * ' ' # and more space on left if text is of even length else: return (excess // 2 + 1) * ' ' + text + (excess // 2) * ' ' # Why distribute extra space this way? To match the behaviour of # the inbuilt str.center() method. else: # Equal padding on either side return (excess // 2) * ' ' + text + (excess // 2) * ' ' def __getattr__(self, name): if name == 'rowcount': return len(self._rows) elif name == 'colcount': if self._field_names: return len(self._field_names) elif self._rows: return len(self._rows[0]) else: return 0 else: raise AttributeError(name) def __getitem__(self, index): new = PrettyTable() new.field_names = self.field_names for attr in self._options: setattr(new, '_' + attr, getattr(self, '_' + attr)) setattr(new, '_align', getattr(self, '_align')) if isinstance(index, slice): for row in self._rows[index]: new.add_row(row) elif isinstance(index, int): new.add_row(self._rows[index]) else: raise Exception( 'Index %s is invalid, must be an integer or slice' % str(index) ) return new if py3k: def __str__(self): return self.__unicode__() else: def __str__(self): return self.__unicode__().encode(self.encoding) def __unicode__(self): return self.get_string() ############################## # ATTRIBUTE VALIDATORS # ############################## # The method _validate_option is all that should be used elsewhere in the code base to validate options. # It will call the appropriate validation method for that option. The individual validation methods should # never need to be called directly (although nothing bad will happen if they *are*). # Validation happens in TWO places. # Firstly, in the property setters defined in the ATTRIBUTE MANAGMENT section. # Secondly, in the _get_options method, where keyword arguments are mixed with persistent settings def _validate_option(self, option, val): if option in ('field_names'): self._validate_field_names(val) elif option in ( 'start', 'end', 'max_width', 'padding_width', 'left_padding_width', 'right_padding_width', 'format', ): self._validate_nonnegative_int(option, val) elif option in ('sortby'): self._validate_field_name(option, val) elif option in ('sort_key'): self._validate_function(option, val) elif option in ('hrules'): self._validate_hrules(option, val) elif option in ('vrules'): self._validate_vrules(option, val) elif option in ('fields'): self._validate_all_field_names(option, val) elif option in ('header', 'border', 'reversesort', 'xhtml', 'print_empty'): self._validate_true_or_false(option, val) elif option in ('header_style'): self._validate_header_style(val) elif option in ('int_format'): self._validate_int_format(option, val) elif option in ('float_format'): self._validate_float_format(option, val) elif option in ('vertical_char', 'horizontal_char', 'junction_char'): self._validate_single_char(option, val) elif option in ('attributes'): self._validate_attributes(option, val) else: raise Exception('Unrecognised option: %s!' % option) def _validate_field_names(self, val): # Check for appropriate length if self._field_names: try: assert len(val) == len(self._field_names) except AssertionError: raise Exception( 'Field name list has incorrect number of values, (actual) %d!=%d (expected)' % (len(val), len(self._field_names)) ) if self._rows: try: assert len(val) == len(self._rows[0]) except AssertionError: raise Exception( 'Field name list has incorrect number of values, (actual) %d!=%d (expected)' % (len(val), len(self._rows[0])) ) # Check for uniqueness try: assert len(val) == len(set(val)) except AssertionError: raise Exception('Field names must be unique!') def _validate_header_style(self, val): try: assert val in ('cap', 'title', 'upper', 'lower', None) except AssertionError: raise Exception( 'Invalid header style, use cap, title, upper, lower or None!' ) def _validate_align(self, val): try: assert val in ['l', 'c', 'r'] except AssertionError: raise Exception('Alignment %s is invalid, use l, c or r!' % val) def _validate_valign(self, val): try: assert val in ['t', 'm', 'b', None] except AssertionError: raise Exception('Alignment %s is invalid, use t, m, b or None!' % val) def _validate_nonnegative_int(self, name, val): try: assert int(val) >= 0 except AssertionError: raise Exception('Invalid value for %s: %s!' % (name, self._unicode(val))) def _validate_true_or_false(self, name, val): try: assert val in (True, False) except AssertionError: raise Exception('Invalid value for %s! Must be True or False.' % name) def _validate_int_format(self, name, val): if val == '': return try: assert type(val) in (str, str) assert val.isdigit() except AssertionError: raise Exception( 'Invalid value for %s! Must be an integer format string.' % name ) def _validate_float_format(self, name, val): if val == '': return try: assert type(val) in (str, str) assert '.' in val bits = val.split('.') assert len(bits) <= 2 assert bits[0] == '' or bits[0].isdigit() assert bits[1] == '' or bits[1].isdigit() except AssertionError: raise Exception( 'Invalid value for %s! Must be a float format string.' % name ) def _validate_function(self, name, val): try: assert hasattr(val, '__call__') except AssertionError: raise Exception('Invalid value for %s! Must be a function.' % name) def _validate_hrules(self, name, val): try: assert val in (ALL, FRAME, HEADER, NONE) except AssertionError: raise Exception( 'Invalid value for %s! Must be ALL, FRAME, HEADER or NONE.' % name ) def _validate_vrules(self, name, val): try: assert val in (ALL, FRAME, NONE) except AssertionError: raise Exception( 'Invalid value for %s! Must be ALL, FRAME, or NONE.' % name ) def _validate_field_name(self, name, val): try: assert (val in self._field_names) or (val is None) except AssertionError: raise Exception('Invalid field name: %s!' % val) def _validate_all_field_names(self, name, val): try: for x in val: self._validate_field_name(name, x) except AssertionError: raise Exception('fields must be a sequence of field names!') def _validate_single_char(self, name, val): return try: assert _str_block_width(val) == 1 except AssertionError: raise Exception( 'Invalid value for %s! Must be a string of length 1.' % name ) def _validate_attributes(self, name, val): try: assert isinstance(val, dict) except AssertionError: raise Exception('attributes must be a dictionary of name/value pairs!') ############################## # ATTRIBUTE MANAGEMENT # ############################## def _get_field_names(self): return self._field_names """The names of the fields Arguments: fields - list or tuple of field names""" def _set_field_names(self, val): val = [self._unicode(x) for x in val] self._validate_option('field_names', val) if self._field_names: old_names = self._field_names[:] self._field_names = val if self._align and old_names: for old_name, new_name in zip(old_names, val): self._align[new_name] = self._align[old_name] for old_name in old_names: if old_name not in self._align: self._align.pop(old_name) else: for field in self._field_names: self._align[field] = 'c' if self._valign and old_names: for old_name, new_name in zip(old_names, val): self._valign[new_name] = self._valign[old_name] for old_name in old_names: if old_name not in self._valign: self._valign.pop(old_name) else: for field in self._field_names: self._valign[field] = 't' field_names = property(_get_field_names, _set_field_names) def _get_align(self): return self._align def _set_align(self, val): self._validate_align(val) for field in self._field_names: self._align[field] = val align = property(_get_align, _set_align) def _get_valign(self): return self._valign def _set_valign(self, val): self._validate_valign(val) for field in self._field_names: self._valign[field] = val valign = property(_get_valign, _set_valign) def _get_max_width(self): return self._max_width def _set_max_width(self, val): self._validate_option('max_width', val) for field in self._field_names: self._max_width[field] = val max_width = property(_get_max_width, _set_max_width) def _get_fields(self): """List or tuple of field names to include in displays Arguments: fields - list or tuple of field names to include in displays""" return self._fields def _set_fields(self, val): self._validate_option('fields', val) self._fields = val fields = property(_get_fields, _set_fields) def _get_start(self): """Start index of the range of rows to print Arguments: start - index of first data row to include in output""" return self._start def _set_start(self, val): self._validate_option('start', val) self._start = val start = property(_get_start, _set_start) def _get_end(self): """End index of the range of rows to print Arguments: end - index of last data row to include in output PLUS ONE (list slice style)""" return self._end def _set_end(self, val): self._validate_option('end', val) self._end = val end = property(_get_end, _set_end) def _get_sortby(self): """Name of field by which to sort rows Arguments: sortby - field name to sort by""" return self._sortby def _set_sortby(self, val): self._validate_option('sortby', val) self._sortby = val sortby = property(_get_sortby, _set_sortby) def _get_reversesort(self): """Controls direction of sorting (ascending vs descending) Arguments: reveresort - set to True to sort by descending order, or False to sort by ascending order""" return self._reversesort def _set_reversesort(self, val): self._validate_option('reversesort', val) self._reversesort = val reversesort = property(_get_reversesort, _set_reversesort) def _get_sort_key(self): """Sorting key function, applied to data points before sorting Arguments: sort_key - a function which takes one argument and returns something to be sorted""" return self._sort_key def _set_sort_key(self, val): self._validate_option('sort_key', val) self._sort_key = val sort_key = property(_get_sort_key, _set_sort_key) def _get_header(self): """Controls printing of table header with field names Arguments: header - print a header showing field names (True or False)""" return self._header def _set_header(self, val): self._validate_option('header', val) self._header = val header = property(_get_header, _set_header) def _get_header_style(self): """Controls stylisation applied to field names in header Arguments: header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)""" return self._header_style def _set_header_style(self, val): self._validate_header_style(val) self._header_style = val header_style = property(_get_header_style, _set_header_style) def _get_border(self): """Controls printing of border around table Arguments: border - print a border around the table (True or False)""" return self._border def _set_border(self, val): self._validate_option('border', val) self._border = val border = property(_get_border, _set_border) def _get_hrules(self): """Controls printing of horizontal rules after rows Arguments: hrules - horizontal rules style. Allowed values: FRAME, ALL, HEADER, NONE""" return self._hrules def _set_hrules(self, val): self._validate_option('hrules', val) self._hrules = val hrules = property(_get_hrules, _set_hrules) def _get_vrules(self): """Controls printing of vertical rules between columns Arguments: vrules - vertical rules style. Allowed values: FRAME, ALL, NONE""" return self._vrules def _set_vrules(self, val): self._validate_option('vrules', val) self._vrules = val vrules = property(_get_vrules, _set_vrules) def _get_int_format(self): """Controls formatting of integer data Arguments: int_format - integer format string""" return self._int_format def _set_int_format(self, val): # self._validate_option("int_format", val) for field in self._field_names: self._int_format[field] = val int_format = property(_get_int_format, _set_int_format) def _get_float_format(self): """Controls formatting of floating point data Arguments: float_format - floating point format string""" return self._float_format def _set_float_format(self, val): # self._validate_option("float_format", val) for field in self._field_names: self._float_format[field] = val float_format = property(_get_float_format, _set_float_format) def _get_padding_width(self): """The number of empty spaces between a column's edge and its content Arguments: padding_width - number of spaces, must be a positive integer""" return self._padding_width def _set_padding_width(self, val): self._validate_option('padding_width', val) self._padding_width = val padding_width = property(_get_padding_width, _set_padding_width) def _get_left_padding_width(self): """The number of empty spaces between a column's left edge and its content Arguments: left_padding - number of spaces, must be a positive integer""" return self._left_padding_width def _set_left_padding_width(self, val): self._validate_option('left_padding_width', val) self._left_padding_width = val left_padding_width = property(_get_left_padding_width, _set_left_padding_width) def _get_right_padding_width(self): """The number of empty spaces between a column's right edge and its content Arguments: right_padding - number of spaces, must be a positive integer""" return self._right_padding_width def _set_right_padding_width(self, val): self._validate_option('right_padding_width', val) self._right_padding_width = val right_padding_width = property(_get_right_padding_width, _set_right_padding_width) def _get_vertical_char(self): """The charcter used when printing table borders to draw vertical lines Arguments: vertical_char - single character string used to draw vertical lines""" return self._vertical_char def _set_vertical_char(self, val): val = self._unicode(val) self._validate_option('vertical_char', val) self._vertical_char = val vertical_char = property(_get_vertical_char, _set_vertical_char) def _get_horizontal_char(self): """The charcter used when printing table borders to draw horizontal lines Arguments: horizontal_char - single character string used to draw horizontal lines""" return self._horizontal_char def _set_horizontal_char(self, val): val = self._unicode(val) self._validate_option('horizontal_char', val) self._horizontal_char = val horizontal_char = property(_get_horizontal_char, _set_horizontal_char) def _get_junction_char(self): """The charcter used when printing table borders to draw line junctions Arguments: junction_char - single character string used to draw line junctions""" return self._junction_char def _set_junction_char(self, val): val = self._unicode(val) self._validate_option('vertical_char', val) self._junction_char = val junction_char = property(_get_junction_char, _set_junction_char) def _get_format(self): """Controls whether or not HTML tables are formatted to match styling options Arguments: format - True or False""" return self._format def _set_format(self, val): self._validate_option('format', val) self._format = val format = property(_get_format, _set_format) def _get_print_empty(self): """Controls whether or not empty tables produce a header and frame or just an empty string Arguments: print_empty - True or False""" return self._print_empty def _set_print_empty(self, val): self._validate_option('print_empty', val) self._print_empty = val print_empty = property(_get_print_empty, _set_print_empty) def _get_attributes(self): """A dictionary of HTML attribute name/value pairs to be included in the <table> tag when printing HTML Arguments: attributes - dictionary of attributes""" return self._attributes def _set_attributes(self, val): self._validate_option('attributes', val) self._attributes = val attributes = property(_get_attributes, _set_attributes) ############################## # OPTION MIXER # ############################## def _get_options(self, kwargs): options = {} for option in self._options: if option in kwargs: self._validate_option(option, kwargs[option]) options[option] = kwargs[option] else: options[option] = getattr(self, '_' + option) return options ############################## # PRESET STYLE LOGIC # ############################## def set_style(self, style): if style == DEFAULT: self._set_default_style() elif style == MSWORD_FRIENDLY: self._set_msword_style() elif style == PLAIN_COLUMNS: self._set_columns_style() elif style == RANDOM: self._set_random_style() else: raise Exception('Invalid pre-set style!') def _set_default_style(self): self.header = True self.border = True self._hrules = FRAME self._vrules = ALL self.padding_width = 1 self.left_padding_width = 1 self.right_padding_width = 1 self.vertical_char = '|' self.horizontal_char = '-' self.junction_char = '+' def _set_msword_style(self): self.header = True self.border = True self._hrules = NONE self.padding_width = 1 self.left_padding_width = 1 self.right_padding_width = 1 self.vertical_char = '|' def _set_columns_style(self): self.header = True self.border = False self.padding_width = 1 self.left_padding_width = 0 self.right_padding_width = 8 def _set_random_style(self): # Just for fun! self.header = random.choice((True, False)) self.border = random.choice((True, False)) self._hrules = random.choice((ALL, FRAME, HEADER, NONE)) self._vrules = random.choice((ALL, FRAME, NONE)) self.left_padding_width = random.randint(0, 5) self.right_padding_width = random.randint(0, 5) self.vertical_char = random.choice('~!@#$%^&*()_+|-={}[];\':",./;<>?') self.horizontal_char = random.choice('~!@#$%^&*()_+|-={}[];\':",./;<>?') self.junction_char = random.choice('~!@#$%^&*()_+|-={}[];\':",./;<>?') ############################## # DATA INPUT METHODS # ############################## def add_row(self, row): """Add a row to the table Arguments: row - row of data, should be a list with as many elements as the table has fields""" if self._field_names and len(row) != len(self._field_names): raise Exception( 'Row has incorrect number of values, (actual) %d!=%d (expected)' % (len(row), len(self._field_names)) ) if not self._field_names: self.field_names = [('Field %d' % (n + 1)) for n in range(0, len(row))] self._rows.append(list(row)) def del_row(self, row_index): """Delete a row to the table Arguments: row_index - The index of the row you want to delete. Indexing starts at 0.""" if row_index > len(self._rows) - 1: raise Exception( 'Cant delete row at index %d, table only has %d rows!' % (row_index, len(self._rows)) ) del self._rows[row_index] def add_column(self, fieldname, column, align='c', valign='t'): """Add a column to the table. Arguments: fieldname - name of the field to contain the new column of data column - column of data, should be a list with as many elements as the table has rows align - desired alignment for this column - "l" for left, "c" for centre and "r" for right valign - desired vertical alignment for new columns - "t" for top, "m" for middle and "b" for bottom""" if len(self._rows) in (0, len(column)): self._validate_align(align) self._validate_valign(valign) self._field_names.append(fieldname) self._align[fieldname] = align self._valign[fieldname] = valign for i in range(0, len(column)): if len(self._rows) < i + 1: self._rows.append([]) self._rows[i].append(column[i]) else: raise Exception( 'Column length %d does not match number of rows %d!' % (len(column), len(self._rows)) ) def clear_rows(self): """Delete all rows from the table but keep the current field names""" self._rows = [] def clear(self): """Delete all rows and field names from the table, maintaining nothing but styling options""" self._rows = [] self._field_names = [] self._widths = [] ############################## # MISC PUBLIC METHODS # ############################## def copy(self): return copy.deepcopy(self) ############################## # MISC PRIVATE METHODS # ############################## def _format_value(self, field, value): if isinstance(value, int) and field in self._int_format: value = self._unicode(('%%%sd' % self._int_format[field]) % value) elif isinstance(value, float) and field in self._float_format: value = self._unicode(('%%%sf' % self._float_format[field]) % value) return self._unicode(value) def _compute_widths(self, rows, options): if options['header']: widths = [_get_size(field)[0] for field in self._field_names] else: widths = len(self.field_names) * [0] for row in rows: for index, value in enumerate(row): fieldname = self.field_names[index] if fieldname in self.max_width: widths[index] = max( widths[index], min(_get_size(value)[0], self.max_width[fieldname]), ) else: widths[index] = max(widths[index], _get_size(value)[0]) self._widths = widths def _get_padding_widths(self, options): if options['left_padding_width'] is not None: lpad = options['left_padding_width'] else: lpad = options['padding_width'] if options['right_padding_width'] is not None: rpad = options['right_padding_width'] else: rpad = options['padding_width'] return lpad, rpad def _get_rows(self, options): """Return only those data rows that should be printed, based on slicing and sorting. Arguments: options - dictionary of option settings.""" # Make a copy of only those rows in the slice range rows = copy.deepcopy(self._rows[options['start'] : options['end']]) # Sort if necessary if options['sortby']: sortindex = self._field_names.index(options['sortby']) # Decorate rows = [[row[sortindex]] + row for row in rows] # Sort rows.sort(reverse=options['reversesort'], key=options['sort_key']) # Undecorate rows = [row[1:] for row in rows] return rows def _format_row(self, row, options): return [ self._format_value(field, value) for (field, value) in zip(self._field_names, row) ] def _format_rows(self, rows, options): return [self._format_row(row, options) for row in rows] ############################## # PLAIN TEXT STRING METHODS # ############################## def get_string(self, **kwargs): """Return string representation of table in current state. Arguments: start - index of first data row to include in output end - index of last data row to include in output PLUS ONE (list slice style) fields - names of fields (columns) to include header - print a header showing field names (True or False) border - print a border around the table (True or False) hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE int_format - controls formatting of integer data float_format - controls formatting of floating point data padding_width - number of spaces on either side of column data (only used if left and right paddings are None) left_padding_width - number of spaces on left hand side of column data right_padding_width - number of spaces on right hand side of column data vertical_char - single character string used to draw vertical lines horizontal_char - single character string used to draw horizontal lines junction_char - single character string used to draw line junctions sortby - name of field to sort rows by sort_key - sorting key function, applied to data points before sorting reversesort - True or False to sort in descending or ascending order print empty - if True, stringify just the header for an empty table, if False return an empty string """ options = self._get_options(kwargs) lines = [] # Don't think too hard about an empty table # Is this the desired behaviour? Maybe we should still print the header? if self.rowcount == 0 and (not options['print_empty'] or not options['border']): return '' # Get the rows we need to print, taking into account slicing, sorting, etc. rows = self._get_rows(options) # Turn all data in all rows into Unicode, formatted as desired formatted_rows = self._format_rows(rows, options) # Compute column widths self._compute_widths(formatted_rows, options) # Add header or top of border self._hrule = self._stringify_hrule(options) if options['header']: lines.append(self._stringify_header(options)) elif options['border'] and options['hrules'] in (ALL, FRAME): lines.append(self._hrule) # Add rows for row in formatted_rows: lines.append(self._stringify_row(row, options)) # Add bottom of border if options['border'] and options['hrules'] == FRAME: lines.append(self._hrule) return self._unicode('\n').join(lines) def _stringify_hrule(self, options): if not options['border']: return '' lpad, rpad = self._get_padding_widths(options) if options['vrules'] in (ALL, FRAME): bits = [options['junction_char']] else: bits = [options['horizontal_char']] # For tables with no data or fieldnames if not self._field_names: bits.append(options['junction_char']) return ''.join(bits) for field, width in zip(self._field_names, self._widths): if options['fields'] and field not in options['fields']: continue bits.append((width + lpad + rpad) * options['horizontal_char']) if options['vrules'] == ALL: bits.append(options['junction_char']) else: bits.append(options['horizontal_char']) if options['vrules'] == FRAME: bits.pop() bits.append(options['junction_char']) return ''.join(bits) def _stringify_header(self, options): bits = [] lpad, rpad = self._get_padding_widths(options) if options['border']: if options['hrules'] in (ALL, FRAME): bits.append(self._hrule) bits.append('\n') if options['vrules'] in (ALL, FRAME): bits.append(options['vertical_char']) else: bits.append(' ') # For tables with no data or field names if not self._field_names: if options['vrules'] in (ALL, FRAME): bits.append(options['vertical_char']) else: bits.append(' ') for field, width, in zip(self._field_names, self._widths): if options['fields'] and field not in options['fields']: continue if self._header_style == 'cap': fieldname = field.capitalize() elif self._header_style == 'title': fieldname = field.title() elif self._header_style == 'upper': fieldname = field.upper() elif self._header_style == 'lower': fieldname = field.lower() else: fieldname = field bits.append( ' ' * lpad + self._justify(fieldname, width, self._align[field]) + ' ' * rpad ) if options['border']: if options['vrules'] == ALL: bits.append(options['vertical_char']) else: bits.append(' ') # If vrules is FRAME, then we just appended a space at the end # of the last field, when we really want a vertical character if options['border'] and options['vrules'] == FRAME: bits.pop() bits.append(options['vertical_char']) if options['border'] and options['hrules'] != NONE: bits.append('\n') bits.append(self._hrule) return ''.join(bits) def _stringify_row(self, row, options): for index, field, value, width, in zip( list(range(0, len(row))), self._field_names, row, self._widths ): # Enforce max widths lines = value.split('\n') new_lines = [] for line in lines: if _str_block_width(line) > width: line = textwrap.fill(line, width) new_lines.append(line) lines = new_lines value = '\n'.join(lines) row[index] = value row_height = 0 for c in row: h = _get_size(c)[1] if h > row_height: row_height = h bits = [] lpad, rpad = self._get_padding_widths(options) for y in range(0, row_height): bits.append([]) if options['border']: if options['vrules'] in (ALL, FRAME): bits[y].append(self.vertical_char) else: bits[y].append(' ') for field, value, width, in zip(self._field_names, row, self._widths): valign = self._valign[field] lines = value.split('\n') dHeight = row_height - len(lines) if dHeight: if valign == 'm': lines = ( [''] * int(dHeight / 2) + lines + [''] * (dHeight - int(dHeight / 2)) ) elif valign == 'b': lines = [''] * dHeight + lines else: lines = lines + [''] * dHeight y = 0 for l in lines: if options['fields'] and field not in options['fields']: continue bits[y].append( ' ' * lpad + self._justify(l, width, self._align[field]) + ' ' * rpad ) if options['border']: if options['vrules'] == ALL: bits[y].append(self.vertical_char) else: bits[y].append(' ') y += 1 # If vrules is FRAME, then we just appended a space at the end # of the last field, when we really want a vertical character for y in range(0, row_height): if options['border'] and options['vrules'] == FRAME: bits[y].pop() bits[y].append(options['vertical_char']) if options['border'] and options['hrules'] == ALL: bits[row_height - 1].append('\n') bits[row_height - 1].append(self._hrule) for y in range(0, row_height): bits[y] = ''.join(bits[y]) return '\n'.join(bits) ############################## # HTML STRING METHODS # ############################## def get_html_string(self, **kwargs): """Return string representation of HTML formatted version of table in current state. Arguments: start - index of first data row to include in output end - index of last data row to include in output PLUS ONE (list slice style) fields - names of fields (columns) to include header - print a header showing field names (True or False) border - print a border around the table (True or False) hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE int_format - controls formatting of integer data float_format - controls formatting of floating point data padding_width - number of spaces on either side of column data (only used if left and right paddings are None) left_padding_width - number of spaces on left hand side of column data right_padding_width - number of spaces on right hand side of column data sortby - name of field to sort rows by sort_key - sorting key function, applied to data points before sorting attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag xhtml - print <br/> tags if True, <br> tags if false""" options = self._get_options(kwargs) if options['format']: string = self._get_formatted_html_string(options) else: string = self._get_simple_html_string(options) return string def _get_simple_html_string(self, options): lines = [] if options['xhtml']: linebreak = '<br/>' else: linebreak = '<br>' open_tag = [] open_tag.append('<table') if options['attributes']: for attr_name in options['attributes']: open_tag.append( ' %s="%s"' % (attr_name, options['attributes'][attr_name]) ) open_tag.append('>') lines.append(''.join(open_tag)) # Headers if options['header']: lines.append(' <tr>') for field in self._field_names: if options['fields'] and field not in options['fields']: continue lines.append( ' <th>%s</th>' % escape(field).replace('\n', linebreak) ) lines.append(' </tr>') # Data rows = self._get_rows(options) formatted_rows = self._format_rows(rows, options) for row in formatted_rows: lines.append(' <tr>') for field, datum in zip(self._field_names, row): if options['fields'] and field not in options['fields']: continue lines.append( ' <td>%s</td>' % escape(datum).replace('\n', linebreak) ) lines.append(' </tr>') lines.append('</table>') return self._unicode('\n').join(lines) def _get_formatted_html_string(self, options): lines = [] lpad, rpad = self._get_padding_widths(options) if options['xhtml']: linebreak = '<br/>' else: linebreak = '<br>' open_tag = [] open_tag.append('<table') if options['border']: if options['hrules'] == ALL and options['vrules'] == ALL: open_tag.append(' frame="box" rules="all"') elif options['hrules'] == FRAME and options['vrules'] == FRAME: open_tag.append(' frame="box"') elif options['hrules'] == FRAME and options['vrules'] == ALL: open_tag.append(' frame="box" rules="cols"') elif options['hrules'] == FRAME: open_tag.append(' frame="hsides"') elif options['hrules'] == ALL: open_tag.append(' frame="hsides" rules="rows"') elif options['vrules'] == FRAME: open_tag.append(' frame="vsides"') elif options['vrules'] == ALL: open_tag.append(' frame="vsides" rules="cols"') if options['attributes']: for attr_name in options['attributes']: open_tag.append( ' %s="%s"' % (attr_name, options['attributes'][attr_name]) ) open_tag.append('>') lines.append(''.join(open_tag)) # Headers if options['header']: lines.append(' <tr>') for field in self._field_names: if options['fields'] and field not in options['fields']: continue lines.append( ' <th style="padding-left: %dem; padding-right: %dem; text-align: center">%s</th>' % (lpad, rpad, escape(field).replace('\n', linebreak)) ) lines.append(' </tr>') # Data rows = self._get_rows(options) formatted_rows = self._format_rows(rows, options) aligns = [] valigns = [] for field in self._field_names: aligns.append( {'l': 'left', 'r': 'right', 'c': 'center'}[self._align[field]] ) valigns.append( {'t': 'top', 'm': 'middle', 'b': 'bottom'}[self._valign[field]] ) for row in formatted_rows: lines.append(' <tr>') for field, datum, align, valign in zip( self._field_names, row, aligns, valigns ): if options['fields'] and field not in options['fields']: continue lines.append( ' <td style="padding-left: %dem; padding-right: %dem; text-align: %s; vertical-align: %s">%s</td>' % ( lpad, rpad, align, valign, escape(datum).replace('\n', linebreak), ) ) lines.append(' </tr>') lines.append('</table>') return self._unicode('\n').join(lines) ############################## # UNICODE WIDTH FUNCTIONS # ############################## def _char_block_width(char): # Basic Latin, which is probably the most common case # if char in xrange(0x0021, 0x007e): # if char >= 0x0021 and char <= 0x007e: if 0x0021 <= char <= 0x007E: return 1 # Chinese, Japanese, Korean (common) if 0x4E00 <= char <= 0x9FFF: return 2 # Hangul if 0xAC00 <= char <= 0xD7AF: return 2 # Combining? if unicodedata.combining(uni_chr(char)): return 0 # Hiragana and Katakana if 0x3040 <= char <= 0x309F or 0x30A0 <= char <= 0x30FF: return 2 # Full-width Latin characters if 0xFF01 <= char <= 0xFF60: return 2 # CJK punctuation if 0x3000 <= char <= 0x303E: return 2 # Backspace and delete if char in (0x0008, 0x007F): return -1 # Other control characters elif char in (0x0000, 0x001F): return 0 # Take a guess return 1 def _str_block_width(val): return sum(map(_char_block_width, list(map(ord, _re.sub('', val))))) ############################## # TABLE FACTORIES # ############################## def from_csv(fp, field_names=None, **kwargs): dialect = csv.Sniffer().sniff(fp.read(1024)) fp.seek(0) reader = csv.reader(fp, dialect) table = PrettyTable(**kwargs) if field_names: table.field_names = field_names else: if py3k: table.field_names = [x.strip() for x in next(reader)] else: table.field_names = [x.strip() for x in next(reader)] for row in reader: table.add_row([x.strip() for x in row]) return table def from_db_cursor(cursor, **kwargs): if cursor.description: table = PrettyTable(**kwargs) table.field_names = [col[0] for col in cursor.description] for row in cursor.fetchall(): table.add_row(row) return table class TableHandler(HTMLParser): def __init__(self, **kwargs): HTMLParser.__init__(self) self.kwargs = kwargs self.tables = [] self.last_row = [] self.rows = [] self.max_row_width = 0 self.active = None self.last_content = '' self.is_last_row_header = False def handle_starttag(self, tag, attrs): self.active = tag if tag == 'th': self.is_last_row_header = True def handle_endtag(self, tag): if tag in ['th', 'td']: stripped_content = self.last_content.strip() self.last_row.append(stripped_content) if tag == 'tr': self.rows.append((self.last_row, self.is_last_row_header)) self.max_row_width = max(self.max_row_width, len(self.last_row)) self.last_row = [] self.is_last_row_header = False if tag == 'table': table = self.generate_table(self.rows) self.tables.append(table) self.rows = [] self.last_content = ' ' self.active = None def handle_data(self, data): self.last_content += data def generate_table(self, rows): """ Generates from a list of rows a PrettyTable object. """ table = PrettyTable(**self.kwargs) for row in self.rows: if len(row[0]) < self.max_row_width: appends = self.max_row_width - len(row[0]) for i in range(1, appends): row[0].append('-') if row[1] == True: self.make_fields_unique(row[0]) table.field_names = row[0] else: table.add_row(row[0]) return table def make_fields_unique(self, fields): """ iterates over the row and make each field unique """ for i in range(0, len(fields)): for j in range(i + 1, len(fields)): if fields[i] == fields[j]: fields[j] += "'" def from_html(html_code, **kwargs): """ Generates a list of PrettyTables from a string of HTML code. Each <table> in the HTML becomes one PrettyTable object. """ parser = TableHandler(**kwargs) parser.feed(html_code) return parser.tables def from_html_one(html_code, **kwargs): """ Generates a PrettyTables from a string of HTML code which contains only a single <table> """ tables = from_html(html_code, **kwargs) try: assert len(tables) == 1 except AssertionError: raise Exception( 'More than one <table> in provided HTML code! Use from_html instead.' ) return tables[0] ############################## # MAIN (TEST FUNCTION) # ############################## def main(): x = PrettyTable(['City name', 'Area', 'Population', 'Annual Rainfall']) x.sortby = 'Population' x.reversesort = True x.int_format['Area'] = '04d' x.float_format = '6.1f' x.align['City name'] = 'l' # Left align city names x.add_row(['Hoehenkirchen\nSiegertsbrunn', 1295, 1158259, 600.5]) x.add_row(['Adelaide', 1295, 1158259, 600.5]) x.add_row(['Brisbane', 5905, 1857594, 1146.4]) x.add_row(['Darwin', 112, 120900, 1714.7]) x.add_row(['Hobart', 1357, 205556, 619.5]) x.add_row(['Sydney', 2058, 4336374, 1214.8]) x.add_row(['Melbourne', 1566, 3806092, 646.9]) x.add_row(['Perth', 5386, 1554769, 869.4]) print(x) if __name__ == '__main__': main()
py
1a49cb0a1e678584cea00ab3fdfc70822bbc2933
import logging import time from collections import defaultdict from queue import Queue from threading import Thread from kube_hunter.conf import get_config from kube_hunter.core.types import ActiveHunter, HunterBase from kube_hunter.core.events.types import Vulnerability, EventFilterBase, MultipleEventsContainer logger = logging.getLogger(__name__) # Inherits Queue object, handles events asynchronously class EventQueue(Queue): def __init__(self, num_worker=10): super().__init__() self.passive_hunters = dict() self.active_hunters = dict() self.all_hunters = dict() self.running = True self.workers = list() # -- Regular Subscription -- # Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function) self.hooks = defaultdict(list) self.filters = defaultdict(list) # -------------------------- # -- Multiple Subscription -- # Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function) self.multi_hooks = defaultdict(list) # When subscribing to multiple events, this gets populated with required event classes # Structure: key: Hunter Class, value: set(RequiredEventClass1, RequiredEventClass2) self.hook_dependencies = defaultdict(set) # To keep track of fulfilled dependencies. we need to have a structure which saves historical instanciated # events mapped to a registered hunter. # We used a 2 dimensional dictionary in order to fulfill two demands: # * correctly count published required events # * save historical events fired, easily sorted by their type # # Structure: hook_fulfilled_deps[hunter_class] -> fulfilled_events_for_hunter[event_class] -> [EventObject, EventObject2] self.hook_fulfilled_deps = defaultdict(lambda: defaultdict(list)) # --------------------------- for _ in range(num_worker): t = Thread(target=self.worker) t.daemon = True t.start() self.workers.append(t) t = Thread(target=self.notifier) t.daemon = True t.start() """ ###################################################### + ----------------- Public Methods ----------------- + ###################################################### """ def subscribe(self, event, hook=None, predicate=None, is_register=True): """ The Subscribe Decorator - For Regular Registration Use this to register for one event only. Your hunter will execute each time this event is published @param event - Event class to subscribe to @param predicate - Optional: Function that will be called with the published event as a parameter before trigger. If it's return value is False, the Hunter will not run (default=None). @param hook - Hunter class to register for (ignore when using as a decorator) """ def wrapper(hook): self.subscribe_event(event, hook=hook, predicate=predicate, is_register=is_register) return hook return wrapper def subscribe_many(self, events, hook=None, predicates=None, is_register=True): """ The Subscribe Many Decorator - For Multiple Registration, When your attack needs several prerequisites to exist in the cluster, You need to register for multiple events. Your hunter will execute once for every new combination of required events. For example: 1. event A was published 3 times 2. event B was published once. 3. event B was published again Your hunter will execute 2 times: * (on step 2) with the newest version of A * (on step 3) with the newest version of A and newest version of B @param events - List of event classes to subscribe to @param predicates - Optional: List of function that will be called with the published event as a parameter before trigger. If it's return value is False, the Hunter will not run (default=None). @param hook - Hunter class to register for (ignore when using as a decorator) """ def wrapper(hook): self.subscribe_events(events, hook=hook, predicates=predicates, is_register=is_register) return hook return wrapper def subscribe_once(self, event, hook=None, predicate=None, is_register=True): """ The Subscribe Once Decorator - For Single Trigger Registration, Use this when you want your hunter to execute only in your entire program run wraps subscribe_event method @param events - List of event classes to subscribe to @param predicates - Optional: List of function that will be called with the published event as a parameter before trigger. If it's return value is False, the Hunter will not run (default=None). @param hook - Hunter class to register for (ignore when using as a decorator) """ def wrapper(hook): # installing a __new__ magic method on the hunter # which will remove the hunter from the list upon creation def __new__unsubscribe_self(self, cls): handler.hooks[event].remove((hook, predicate)) return object.__new__(self) hook.__new__ = __new__unsubscribe_self self.subscribe_event(event, hook=hook, predicate=predicate, is_register=is_register) return hook return wrapper def publish_event(self, event, caller=None): """ The Publish Event Method - For Publishing Events To Kube-Hunter's Queue """ # Document that the hunter published a vulnerability (if it's indeed a vulnerability) # For statistics options self._increase_vuln_count(event, caller) # sets the event's parent to be it's publisher hunter. self._set_event_chain(event, caller) # applying filters on the event, before publishing it to subscribers. # if filter returned None, not proceeding to publish event = self.apply_filters(event) if event: # If event was rewritten, make sure it's linked again self._set_event_chain(event, caller) # Regular Hunter registrations - publish logic # Here we iterate over all the registered-to events: for hooked_event in self.hooks.keys(): # We check if the event we want to publish is an inherited class of the current registered-to iterated event # Meaning - if this is a relevant event: if hooked_event in event.__class__.__mro__: # If so, we want to publish to all registerd hunters. for hook, predicate in self.hooks[hooked_event]: if predicate and not predicate(event): continue self.put(hook(event)) logger.debug(f"Event {event.__class__} got published to hunter - {hook} with {event}") # Multiple Hunter registrations - publish logic # Here we iterate over all the registered-to events: for hooked_event in self.multi_hooks.keys(): # We check if the event we want to publish is an inherited class of the current registered-to iterated event # Meaning - if this is a relevant event: if hooked_event in event.__class__.__mro__: # now we iterate over the corresponding registered hunters. for hook, predicate in self.multi_hooks[hooked_event]: if predicate and not predicate(event): continue self._update_multi_hooks(hook, event) if self._is_all_fulfilled_for_hunter(hook): events_container = MultipleEventsContainer(self._get_latest_events_from_multi_hooks(hook)) self.put(hook(events_container)) logger.debug( f"Multiple subscription requirements were met for hunter {hook}. events container was \ published with {self.hook_fulfilled_deps[hook].keys()}" ) """ ###################################################### + ---------------- Private Methods ----------------- + + ---------------- (Backend Logic) ----------------- + ###################################################### """ def _get_latest_events_from_multi_hooks(self, hook): """ Iterates over fulfilled deps for the hunter, and fetching the latest appended events from history """ latest_events = list() for event_class in self.hook_fulfilled_deps[hook].keys(): latest_events.append(self.hook_fulfilled_deps[hook][event_class][-1]) return latest_events def _update_multi_hooks(self, hook, event): """ Updates published events in the multi hooks fulfilled store. """ self.hook_fulfilled_deps[hook][event.__class__].append(event) def _is_all_fulfilled_for_hunter(self, hook): """ Returns true for multi hook fulfilled, else oterwise """ # Check if the first dimension already contains all necessary event classes return len(self.hook_fulfilled_deps[hook].keys()) == len(self.hook_dependencies[hook]) def _set_event_chain(self, event, caller): """ Sets' events attribute chain. In here we link the event with it's publisher (Hunter), so in the next hunter that catches this event, we could access the previous one's attributes. @param event: the event object to be chained @param caller: the Hunter object that published this event. """ if caller: event.previous = caller.event event.hunter = caller.__class__ def _register_hunters(self, hook=None): """ This method is called when a Hunter registers itself to the handler. this is done in order to track and correctly configure the current run of the program. passive_hunters, active_hunters, all_hunters """ config = get_config() if ActiveHunter in hook.__mro__: if not config.active: return False else: self.active_hunters[hook] = hook.__doc__ elif HunterBase in hook.__mro__: self.passive_hunters[hook] = hook.__doc__ if HunterBase in hook.__mro__: self.all_hunters[hook] = hook.__doc__ return True def _register_filter(self, event, hook=None, predicate=None): if hook not in self.filters[event]: self.filters[event].append((hook, predicate)) logging.debug("{} filter subscribed to {}".format(hook, event)) def _register_hook(self, event, hook=None, predicate=None): if hook not in self.hooks[event]: self.hooks[event].append((hook, predicate)) logging.debug("{} subscribed to {}".format(hook, event)) def subscribe_event(self, event, hook=None, predicate=None, is_register=True): if not is_register: return if not self._register_hunters(hook): return # registering filters if EventFilterBase in hook.__mro__: self._register_filter(event, hook, predicate) # registering hunters else: self._register_hook(event, hook, predicate) def subscribe_events(self, events, hook=None, predicates=None, is_register=True): if not is_register: return False if not self._register_hunters(hook): return False if predicates is None: predicates = [None] * len(events) # registering filters. if EventFilterBase in hook.__mro__: for event, predicate in zip(events, predicates): self._register_filter(event, hook, predicate) # registering hunters. else: for event, predicate in zip(events, predicates): self.multi_hooks[event].append((hook, predicate)) self.hook_dependencies[hook] = frozenset(events) def apply_filters(self, event): # if filters are subscribed, apply them on the event for hooked_event in self.filters.keys(): if hooked_event in event.__class__.__mro__: for filter_hook, predicate in self.filters[hooked_event]: if predicate and not predicate(event): continue logger.debug(f"Event {event.__class__} filtered with {filter_hook}") event = filter_hook(event).execute() # if filter decided to remove event, returning None if not event: return None return event def _increase_vuln_count(self, event, caller): config = get_config() if config.statistics and caller: if Vulnerability in event.__class__.__mro__: caller.__class__.publishedVulnerabilities += 1 # executes callbacks on dedicated thread as a daemon def worker(self): while self.running: try: hook = self.get() logger.debug(f"Executing {hook.__class__} with {hook.event.__dict__}") hook.execute() except Exception as ex: logger.debug(ex, exc_info=True) finally: self.task_done() logger.debug("closing thread...") def notifier(self): time.sleep(2) # should consider locking on unfinished_tasks while self.unfinished_tasks > 0: logger.debug(f"{self.unfinished_tasks} tasks left") time.sleep(3) if self.unfinished_tasks == 1: logger.debug("final hook is hanging") # stops execution of all daemons def free(self): self.running = False with self.mutex: self.queue.clear() handler = EventQueue(800)
py
1a49cb1fccdebcdd7ff7c17b1855b975a6e41a98
from sympy.core import (S, pi, oo, symbols, Function, Rational, Integer, Tuple, Derivative) from sympy.integrals import Integral from sympy.concrete import Sum from sympy.functions import exp, sin, cos, conjugate, Max, Min from sympy import mathematica_code as mcode x, y, z = symbols('x,y,z') f = Function('f') def test_Integer(): assert mcode(Integer(67)) == "67" assert mcode(Integer(-1)) == "-1" def test_Rational(): assert mcode(Rational(3, 7)) == "3/7" assert mcode(Rational(18, 9)) == "2" assert mcode(Rational(3, -7)) == "-3/7" assert mcode(Rational(-3, -7)) == "3/7" assert mcode(x + Rational(3, 7)) == "x + 3/7" assert mcode(Rational(3, 7)*x) == "(3/7)*x" def test_Function(): assert mcode(f(x, y, z)) == "f[x, y, z]" assert mcode(sin(x) ** cos(x)) == "Sin[x]^Cos[x]" assert mcode(conjugate(x)) == "Conjugate[x]" assert mcode(Max(x,y,z)*Min(y,z)) == "Max[x, y, z]*Min[y, z]" def test_Pow(): assert mcode(x**3) == "x^3" assert mcode(x**(y**3)) == "x^(y^3)" assert mcode(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \ "(3.5*f[x])^(-x + y^x)/(x^2 + y)" assert mcode(x**-1.0) == 'x^(-1.0)' assert mcode(x**Rational(2, 3)) == 'x^(2/3)' def test_Mul(): A, B, C, D = symbols('A B C D', commutative=False) assert mcode(x*y*z) == "x*y*z" assert mcode(x*y*A) == "x*y*A" assert mcode(x*y*A*B) == "x*y*A**B" assert mcode(x*y*A*B*C) == "x*y*A**B**C" assert mcode(x*A*B*(C + D)*A*y) == "x*y*A**B**(C + D)**A" def test_constants(): assert mcode(pi) == "Pi" assert mcode(oo) == "Infinity" assert mcode(S.NegativeInfinity) == "-Infinity" assert mcode(S.EulerGamma) == "EulerGamma" assert mcode(S.Catalan) == "Catalan" assert mcode(S.Exp1) == "E" def test_containers(): assert mcode([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10], 11]) == \ "{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}" assert mcode((1, 2, (3, 4))) == "{1, 2, {3, 4}}" assert mcode([1]) == "{1}" assert mcode((1,)) == "{1}" assert mcode(Tuple(*[1, 2, 3])) == "{1, 2, 3}" def test_Integral(): assert mcode(Integral(sin(sin(x)), x)) == "Hold[Integrate[Sin[Sin[x]], x]]" assert mcode(Integral(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))) == \ "Hold[Integrate[Exp[-x^2 - y^2], {x, -Infinity, Infinity}, " \ "{y, -Infinity, Infinity}]]" def test_Derivative(): assert mcode(Derivative(sin(x), x)) == "Hold[D[Sin[x], x]]" assert mcode(Derivative(x, x)) == "Hold[D[x, x]]" assert mcode(Derivative(sin(x)*y**4, x, 2)) == "Hold[D[y^4*Sin[x], {x, 2}]]" assert mcode(Derivative(sin(x)*y**4, x, y, x)) == "Hold[D[y^4*Sin[x], x, y, x]]" assert mcode(Derivative(sin(x)*y**4, x, y, 3, x)) == "Hold[D[y^4*Sin[x], x, {y, 3}, x]]" def test_Sum(): assert mcode(Sum(sin(x), (x, 0, 10))) == "Hold[Sum[Sin[x], {x, 0, 10}]]" assert mcode(Sum(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))) == \ "Hold[Sum[Exp[-x^2 - y^2], {x, -Infinity, Infinity}, " \ "{y, -Infinity, Infinity}]]"
py
1a49cd25b92b08715f245f67a8f7163453cdd307
from cConstants import cEPAConstants, cPlotConstants from cEnum import eEPA import cPlot2D import cPlotEPA import sys sys.path.append("../") import bayesact import wx class cPlotFrame(cPlotEPA.cPlotFrame): def __init__(self, iParent, **kwargs): cPlot2D.cPlotFrame.__init__(self, iParent, **kwargs) def initPanel(self, *args, **kwargs): self.m_PlotPanel = cPlotPanel(self, **kwargs) class cPlotPanel(cPlot2D.cPlotPanel): def __init__(self, iParent, iXAxisItem=eEPA.evaluation, iYAxisItem=eEPA.potency, iPlotType=eEPA.fundamental, **kwargs): cPlot2D.cPlotPanel.__init__(self, iParent, **kwargs) self.m_XAxisItem = iXAxisItem self.m_YAxisItem = iYAxisItem self.m_PlotType = iPlotType self.m_SimInteractiveTabsPanel = iParent # The other plots, changes the x and y boundaries of this plot will be done the same to other plots # Good for comparing multiple plots self.m_TwinPlots = [] self.m_LearnerSamples = [] self.m_SimulatorSamples = [] def getSentimentEPAIndex(self, iEPA, iSentiment): return iEPA + (cEPAConstants.m_Dimensions * iSentiment) # Axis items are the enumerations of the elements in eEPA, so they're basically numbers def setAxis(iXAxisItem, iYAxisItem): self.m_XAxisItem = iXAxisItem self.m_YAxisItem = iYAxisItem def plotEPA(self, iLearnerSamples, iSimulatorSamples, iLearnerPreviousAction, iSimulatorPreviousAction): self.clearAxes() # Size is the size of the point in terms of viewing size lsize=50 # Alpha is the opacity of the point lalpha=0.5 self.m_LearnerSamples = iLearnerSamples self.m_SimulatorSamples = iSimulatorSamples if (0 < len(iLearnerSamples)): # Learner's sentiments on self and other, green and pink respectively learnerSamplesXIndexSelf = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier) learnerSamplesYIndexSelf = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier) learnerSamplesXIndexOther = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier) learnerSamplesYIndexOther = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier) self.plotScatter( iLearnerSamples[learnerSamplesXIndexSelf], iLearnerSamples[learnerSamplesYIndexSelf], iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=lsize, c="cyan", alpha=lalpha, animated=False) self.plotScatter( iLearnerSamples[learnerSamplesXIndexOther], iLearnerSamples[learnerSamplesYIndexOther], iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=lsize, c="blue", alpha=lalpha, animated=False) # This also checks that when an action has an EPA rating of (0, 0, 0), it will not plot it if (0 < len(iLearnerPreviousAction)): if ((0, 0, 0) == (iLearnerPreviousAction[0], iLearnerPreviousAction[1], iLearnerPreviousAction[2])): pass else: self.plotScatter( iLearnerPreviousAction[self.m_XAxisItem], iLearnerPreviousAction[self.m_YAxisItem], marker="*", s=200, c="turquoise", alpha=1) if (0 < len(iSimulatorSamples)): # Simulator's sentiments on self and other, goldenrod and blue respectively simulatorSamplesXIndexSelf = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier) simulatorSamplesYIndexSelf = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier) simulatorSamplesXIndexOther = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier) simulatorSamplesYIndexOther = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier) self.plotScatter( iSimulatorSamples[simulatorSamplesXIndexSelf], iSimulatorSamples[simulatorSamplesYIndexSelf], iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=lsize, c="magenta", alpha=lalpha, animated=False) self.plotScatter( iSimulatorSamples[simulatorSamplesXIndexOther], iSimulatorSamples[simulatorSamplesYIndexOther], iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=lsize, c="red", alpha=lalpha, animated=False) if (0 < len(iSimulatorPreviousAction)): if ((0, 0, 0) == (iSimulatorPreviousAction[0],iSimulatorPreviousAction[1], iSimulatorPreviousAction[2])): pass else: self.plotScatter( iSimulatorPreviousAction[self.m_XAxisItem], iSimulatorPreviousAction[self.m_YAxisItem], marker="*", s=200, c="magenta", alpha=1) self.m_Axes.set_title(self.m_Title, fontsize=12) self.m_Axes.set_xlabel(cEPAConstants.m_EPALabels[self.m_XAxisItem]) self.m_Axes.set_ylabel(cEPAConstants.m_EPALabels[self.m_YAxisItem]) self.redrawAxes() def onMousePress(self, iEvent): # Returns (index, minDist), where minDist is the minimum euclidean distance calculated def getMin(data, x, y): index = 0 minDist = ((data[0][0] - x) ** 2) + ((data[1][0] - y) ** 2) points = len(data[0]) for i in range(points-1): dist = ((data[0][i+1] - x) ** 2) + ((data[1][i+1] - y) ** 2) if (dist < minDist): minDist = dist index = i+1 return (index, minDist) def getSampleEPA(data, dataIndex, evaluationIndex, potencyIndex, activityIndex): return [data[evaluationIndex][dataIndex], data[potencyIndex][dataIndex], data[activityIndex][dataIndex]] # Do default function, then find closest point, if anything is plotted # Please note that this does not include the previous action super(cPlotPanel, self).onMousePress(iEvent) # 1 represents left click, check for closest point when left clicking if(1 != iEvent.button): return if (iEvent.inaxes != self.m_Axes): return if (0 >= len(self.m_LearnerSamples)): return xPoint = iEvent.xdata yPoint = iEvent.ydata learnerSamplesXIndexSelf = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier) learnerSamplesYIndexSelf = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier) learnerSamplesXIndexOther = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier) learnerSamplesYIndexOther = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier) simulatorSamplesXIndexSelf = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier) simulatorSamplesYIndexSelf = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier) simulatorSamplesXIndexOther = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier) simulatorSamplesYIndexOther = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier) # To find the closest point to where the mouse clicked visibleLearnerSelfData = [self.m_LearnerSamples[learnerSamplesXIndexSelf], self.m_LearnerSamples[learnerSamplesYIndexSelf]] visibleLearnerOtherData = [self.m_LearnerSamples[learnerSamplesXIndexOther], self.m_LearnerSamples[learnerSamplesYIndexOther]] visibleSimulatorSelfData = [self.m_SimulatorSamples[simulatorSamplesXIndexSelf], self.m_SimulatorSamples[simulatorSamplesYIndexSelf]] visibleSimulatorOtherData = [self.m_SimulatorSamples[simulatorSamplesXIndexOther], self.m_SimulatorSamples[simulatorSamplesYIndexOther]] learnerSelf, learnerOther, simulatorSelf, simulatorOther = range(4) currentMinIndex, currentMinDist = getMin(visibleLearnerSelfData, xPoint, yPoint) currentMinData = learnerSelf allOtherData = [visibleLearnerOtherData, visibleSimulatorSelfData, visibleSimulatorOtherData] for i in range(len(allOtherData)): tempMinIndex, tempMinDist = getMin(allOtherData[i], xPoint, yPoint) if (tempMinDist < currentMinDist): currentMinIndex, currentMinDist = tempMinIndex, tempMinDist currentMinData = i+1 if (currentMinData == learnerSelf): epa = getSampleEPA(self.m_LearnerSamples, currentMinIndex, eEPA.evaluationSelf, eEPA.potencySelf, eEPA.activitySelf) elif (currentMinData == learnerOther): epa = getSampleEPA(self.m_LearnerSamples, currentMinIndex, eEPA.evaluationOther, eEPA.potencyOther, eEPA.activityOther) elif (currentMinData == simulatorSelf): epa = getSampleEPA(self.m_SimulatorSamples, currentMinIndex, eEPA.evaluationSelf, eEPA.potencySelf, eEPA.activitySelf) else: epa = getSampleEPA(self.m_SimulatorSamples, currentMinIndex, eEPA.evaluationOther, eEPA.potencyOther, eEPA.activityOther) gender = self.m_SimInteractiveTabsPanel.m_OptionsAgentPanel.m_ClientGenderChoice.GetStringSelection() if ("male" == gender): estimatedIdentity = bayesact.findNearestEPAVector(epa, self.m_SimInteractiveTabsPanel.m_fidentitiesMale) else: estimatedIdentity = bayesact.findNearestEPAVector(epa, self.m_SimInteractiveTabsPanel.m_fidentitiesFemale) # Those threes mean 3 decimal places message = "You clicked on point: {}".format((round(xPoint, 3), round(yPoint, 3))) +\ "\nHere is the closest point:" +\ "\nEvaluation: {}\nPotency: {}\nActivity: {}".format(round(epa[eEPA.evaluation], 3), round(epa[eEPA.potency], 3), round(epa[eEPA.activity], 3)) +\ "\nClosest Identity: {}".format(estimatedIdentity) +\ "\nType: {}".format(cEPAConstants.m_PlotDetails[currentMinData]) wx.MessageBox(message, "Closest Point") def changeXAxisLabel(self, iLabel): self.m_XAxisItem = iLabel for plotEPA2D in self.m_TwinPlots: plotEPA2D.m_XAxisItem = iLabel def changeYAxisLabel(self, iLabel): self.m_YAxisItem = iLabel for plotEPA2D in self.m_TwinPlots: plotEPA2D.m_YAxisItem = iLabel def shiftXAxis(self, iShiftAmount): super(cPlotPanel, self).shiftXAxis(iShiftAmount) self.updateAxesData() for plotEPA2D in self.m_TwinPlots: plotEPA2D.m_Axes.set_xlim(self.m_XAxisMin, self.m_XAxisMax) plotEPA2D.redrawAxes() def shiftYAxis(self, iShiftAmount): super(cPlotPanel, self).shiftYAxis(iShiftAmount) self.updateAxesData() for plotEPA2D in self.m_TwinPlots: plotEPA2D.m_Axes.set_ylim(self.m_YAxisMin, self.m_YAxisMax) plotEPA2D.redrawAxes() def zoomAxes(self, iZoomAmount): super(cPlotPanel, self).zoomAxes(iZoomAmount) self.updateAxesData() for plotEPA2D in self.m_TwinPlots: plotEPA2D.m_Axes.set_xlim(self.m_XAxisMin, self.m_XAxisMax) plotEPA2D.m_Axes.set_ylim(self.m_YAxisMin, self.m_YAxisMax) plotEPA2D.redrawAxes() def resetAxes(self): super(cPlotPanel, self).resetAxes() self.updateAxesData() for plotEPA2D in self.m_TwinPlots: plotEPA2D.m_Axes.set_xlim(self.m_XAxisMin, self.m_XAxisMax) plotEPA2D.m_Axes.set_ylim(self.m_YAxisMin, self.m_YAxisMax) plotEPA2D.redrawAxes()
py
1a49ce07d2cd7b217059106d92380fce1c862070
from os import error import threading from threading import Thread from multiprocessing import Process import json import sys from put import split from cat import cat from remove import remove from ls import listallfiles from mapreduce import mapreduce #change path to this file accordingly dfs_setup_config = "/users/vinaynaidu/DFS/setup.json" setupfiledir = "/users/vinaynaidu/DFS/" f = open(dfs_setup_config) config = json.load(f) block_size = config['block_size'] path_to_datanodes = config['path_to_datanodes'] path_to_namenodes = config['path_to_namenodes'] replication_factor = config['replication_factor'] num_datanodes = config['num_datanodes'] datanode_size = config['datanode_size'] sync_period = config['sync_period'] datanode_log_path = config['datanode_log_path'] namenode_log_path = config['namenode_log_path'] namenode_checkpoints = config['namenode_checkpoints'] fs_path = config['fs_path'] dfs_setup_config = config['dfs_setup_config'] setupfiledir = config['dfs_setup_config'][:-10] sys.path.append(path_to_datanodes) sys.path.append(path_to_namenodes) from namenode import namenodereceiveheartbeat1 from secondarynamenode import secnamenodereceiveheartbeat dsthreads = {} for i in range(1, num_datanodes + 1): sys.path.append(path_to_datanodes + 'datanode{}/'.format(i)) exec("from datanode{} import datanode{}HB".format(i, i)) exec("dsthreads['datanodehbthread{}'] = threading.Thread(target = datanode{}HB, name = 'DatanodeHBThread{}')".format(i, i, i)) namenodeHBthread = threading.Thread(target=namenodereceiveheartbeat1, name='namenodeHBthread') secnamenodeHBthread = threading.Thread(target=secnamenodereceiveheartbeat, name='secnamenodeHBthread') namenodeHBthread.start() secnamenodeHBthread.start() for i in range(1, num_datanodes + 1): dsthreads['datanodehbthread{}'.format(i)].start() functionality = '''put, syntax - put <absolute path of the file> cat, syntax - cat <filename> ls, syntax - ls rm, syntax - rm <filename> runmapreducejob -i <absolute path of input file> -o <absolute path of output> -c <dfs setup file> -m <mapper absolute path> -r <reducer absolute path>''' print("The default HDFS or the previous session is loaded...") print("Provide configuration file and run createhdfs.py if you wish to create a new DFS.") while True: print() print("Enter the DFS command...") print(functionality) print() command = input().split() if command[0] == "put": if len(command) == 2: try: message = split(command[1]) print(message) print() except error as e: print(e) else: print("Invalid syntax for put command") if command[0] == "cat": if len(command) == 2: try: cat(command[1]) except error as e: print(e) else: print("Invalid syntax for cat command") if command[0] == "rm": if len(command) == 2: try: remove(command[1]) except error as e: print(e) else: print("Invalid syntax for rm command") if command[0] == "runmapreducejob": if len(command) == 11: inputfilepath = command[2] outputfilepath = command[4] setupfilepath = command[6] mapperpath = command[8] reducerpath = command[10] mapreduce(inputfilepath, outputfilepath, setupfilepath, mapperpath, reducerpath) else: print("Invalid syntax for running Map Reduce job") if command[0] == "ls": print() print("Files present in the DFS -") listallfiles()
py
1a49cedcf448454b2ad42a6972696f7406eba632
#python main.py --env-name "HalfCheetah-v2" # --algo ppo # --use-gae # --log-interval 1 # --num-steps 2048 # --num-processes 1 # --lr 3e-4 # --entropy-coef 0 # --value-loss-coef 0.5 # --ppo-epoch 10 # --num-mini-batch 32 # --gamma 0.99 # --gae-lambda 0.95 # --num-env-steps 10000000 # --use-linear-lr-decay # --use-proper-time-limits # --gail # import argparse # # parser = argparse.ArgumentParser() # parser.add_argument('--sparse', action='store_true', default=True, help='GAT with sparse version or not.') # parser.add_argument('--seed', type=int, default=72, help='Random seed.') # parser.add_argument('--epochs', type=int, default=10000, help='Number of epochs to train.') # # args = parser.parse_args() # # print(args.sparse) # print(args.seed) # print(args.epochs) import torch import numpy as np import random if __name__ == '__main__': # torch.manual_seed(1) # #torch.cuda.manual_seed_all(args.seed) # np.random.seed(1) # for i in range(2): # # for j in range(2): # # a = np.random.rand(3) # # print(a) # a = np.random.rand(3) # print(a) # print("--------1----------") # a = np.random.rand(3) # print(a) # print("--------2----------") # a = np.random.rand(3) # print(a) # #print("------------------") # print("------*------------") # print("[4.17022005e-01 7.20324493e-01 1.14374817e-04]\ # [0.30233257 0.14675589 0.09233859]\ # [0.18626021 0.34556073 0.39676747]\ # [0.53881673 0.41919451 0.6852195 ]")# # print("[4.17022005e-01 7.20324493e-01 1.14374817e-04 3.02332573e-01\ # 1.46755891e-01]") # for _ in range(4): # np.random.seed(1) # b = np.random.choice(a) # print(b) # w = torch.empty(3, 5) # print(w) # print(torch.nn.init.orthogonal_(w)) # a = 3 / 1 # b = 3 // 1 # print("a:", a, "b:", b) # a = [1, 2, 3, 4, 5, 6, 7, 8] # print(a[:8]) # torch.manual_seed(1) # a = torch.randint(1, 100, (1, 9, 2)) # #b = torch.rand(1, ).long() # # print(b) # # b1 = torch.tensor([1, 1, 0]).long() # # b2 = torch.tensor([1]).long() # # c1 = a[b1] # # c2 = a[b2] # print("a:", a) # # print("b1:", b1)random # # print("b2:", b2) # # print("c1:", c1) # # print("c2:", c2) # # b = a[0, 0::4] # b = a // 3 # print("b:", b) # a = torch.tensor([1, 2, 3]) # d = {"A": a} # e = d["A"] # f = d["A"].sum() # g = d["A"].sum().item() # print("e:", e) # print("f:", f) # print("g:", g) # for i, j in d.items(): # print("i:", i, "j:", j) # b = 2 > 1 # print("b:", b) a = np.random.rand(3, 4, 6) b = np.random.rand(2, 5) np.savez('/home/johnny/Document/Python/baselines/data/test.npz', a=a, b=b) print("a:", a, "\n", "b:", b) data = np.load('/home/johnny/Document/Python/baselines/data/test.npz') print("data:", data) print("data['a']:", data['a'], "\n", "data['b']:", data['b']) print("len(data['a']):", len(data['a'])) c = data['a'][:len(data['a'])] print("c:", c)
py
1a49cf0573ed943c6db81fa5fa001609a4e4757e
#!/usr/bin/python # coding: utf-8 # __author__ jax777 import os import cgi import json import urllib import hashlib import datetime import tornado.ioloop import tornado.web import traceback from tornado.escape import json_decode from multiprocessing import Process from sqllitedb import * from DnsServer import dns_server from config import datadb from config import mydomain from config import tz class BaseHandler(tornado.web.RequestHandler): def get_current_user(self): print self.get_secure_cookie("prefix") return self.get_secure_cookie("prefix") class login(tornado.web.RequestHandler): # 页面跳转 /#/info def get(self): pass def post(self): data = json_decode(self.request.body) mail = data['mail'] passwd = data['passwd'] try: prefix = db_login(mail,passwd) if prefix == 1: self.write("0") else: self.set_secure_cookie("prefix", prefix) self.write("1") except Exception,e: traceback.print_exc() self.write("0") class getDomain(tornado.web.RequestHandler): # @tornado.web.authenticated def get(self): prefix = self.get_secure_cookie("prefix") #print prefix if prefix: domains = get_domains(prefix) # hash domain stime ret = [] #print domains for tmp in domains: httpcount = get_http_count(tmp['hash']) dnscount = get_dns_count(tmp['hash']) _ = {'hash': tmp['hash'],'domain':tmp['domain'],'http':httpcount,'dns':dnscount,'time':tmp['stime']} ret.append(_) self.write(json.dumps(ret)) else: self.write("0") class showHttp(tornado.web.RequestHandler): # @tornado.web.authenticated def get(self,hash): http = get_http_info(hash) info = '' for tmp in http: url = urllib.unquote(tmp['url']) info = info + url + 'user-agent:'+ tmp['ua'] + ' srcip:' + tmp['srcip'] + ' time:' + tmp['time'] + '\n' info = cgi.escape(info) # write base64 txt self.write(info) class showDns(tornado.web.RequestHandler): # @tornado.web.authenticated def get(self,hash): dns = get_dns_info(hash) info = '' for tmp in dns: info = info + 'domain:' + tmp['domain'] + ' srcip:' + tmp['srcip'] + ' time:' + tmp['time'] + '\n' info = cgi.escape(info) self.write(info) class deletDomain(tornado.web.RequestHandler): # @tornado.web.authenticated def get(self,hash): delet_domain(hash) self.write('1') class deletALL(tornado.web.RequestHandler): # @tornado.web.authenticated def get(self): self.write("1") class index(tornado.web.RequestHandler): def get(self): self.render('index.html') def ui(): settings = { "static_path": os.path.join(os.path.dirname(__file__), "static"), "login_url": "/#/", "cookie_secret":"tw51dk+wR3iErYBKKKpFwHF20ppWjUBHut3b1cCoWmw=" } return tornado.web.Application([ (r"/login", login), (r"/getDomain", getDomain), (r"/showHttp/([0-9a-z]{32})", showHttp), (r"/showDns/([0-9a-z]{32})", showDns), (r"/deletDomain/([0-9a-z]{32})", deletDomain), (r"/deletAll", deletALL), (r"/", index), ], **settings) class http_handler(tornado.web.RequestHandler): def get(self): url = self.request.protocol + "://" + self.request.host + self.request.uri url = urllib.quote(url) srcip = self.request.remote_ip ua = '' hash = hashlib.new('md5', self.request.host).hexdigest() stime = datetime.datetime.now(tz).strftime( '%Y-%m-%d %H:%M:%S' ) try: if mydomain in self.request.host: prefix = self.request.host.split('.')[-3] judge_domain(hash,self.request.host,prefix,stime) except Exception,e: traceback.print_exc() #id = """""" try: ua = self.request.headers["User-Agent"] except: pass update_http(hash,url,ua,srcip,stime) self.write("hello moto") def http_log(): return tornado.web.Application([ (r".*", http_handler), ]) if __name__ == "__main__": if os.path.exists(datadb): pass else: create_db() UI = ui() UI.listen(8888) print 'UI done' p_dns = Process(target=dns_server, args=()) p_dns.start() print 'dns done' http_server = http_log() http_server.listen(80) print 'http done' tornado.ioloop.IOLoop.current().start()
py
1a49cf4b92fa227780bf9fb87f989ea80921226c
import collections import random import threading import time import weakref import sqlalchemy as tsa from sqlalchemy import event from sqlalchemy import pool from sqlalchemy import select from sqlalchemy import testing from sqlalchemy.engine import default from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_context_ok from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_not from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.engines import testing_engine from sqlalchemy.testing.mock import ANY from sqlalchemy.testing.mock import call from sqlalchemy.testing.mock import Mock from sqlalchemy.testing.mock import patch from sqlalchemy.testing.util import gc_collect from sqlalchemy.testing.util import lazy_gc join_timeout = 10 def MockDBAPI(): # noqa def cursor(): return Mock() def connect(*arg, **kw): def close(): conn.closed = True # mock seems like it might have an issue logging # call_count correctly under threading, not sure. # adding a side_effect for close seems to help. conn = Mock( cursor=Mock(side_effect=cursor), close=Mock(side_effect=close), closed=False, ) return conn def shutdown(value): if value: db.connect = Mock(side_effect=Exception("connect failed")) else: db.connect = Mock(side_effect=connect) db.is_shutdown = value db = Mock( connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False ) return db class PoolTestBase(fixtures.TestBase): def setup(self): pool.clear_managers() self._teardown_conns = [] def teardown(self): for ref in self._teardown_conns: conn = ref() if conn: conn.close() @classmethod def teardown_class(cls): pool.clear_managers() def _with_teardown(self, connection): self._teardown_conns.append(weakref.ref(connection)) return connection def _queuepool_fixture(self, **kw): dbapi, pool = self._queuepool_dbapi_fixture(**kw) return pool def _queuepool_dbapi_fixture(self, **kw): dbapi = MockDBAPI() return ( dbapi, pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw), ) class PoolTest(PoolTestBase): @testing.fails_on( "+pyodbc", "pyodbc cursor doesn't implement tuple __eq__" ) @testing.fails_on("+pg8000", "returns [1], not (1,)") def test_cursor_iterable(self): conn = testing.db.raw_connection() cursor = conn.cursor() cursor.execute(str(select([1], bind=testing.db))) expected = [(1,)] for row in cursor: eq_(row, expected.pop(0)) def test_no_connect_on_recreate(self): def creator(): raise Exception("no creates allowed") for cls in ( pool.SingletonThreadPool, pool.StaticPool, pool.QueuePool, pool.NullPool, pool.AssertionPool, ): p = cls(creator=creator) p.dispose() p2 = p.recreate() assert p2.__class__ is cls mock_dbapi = MockDBAPI() p = cls(creator=mock_dbapi.connect) conn = p.connect() conn.close() mock_dbapi.connect.side_effect = Exception("error!") p.dispose() p.recreate() def test_info(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0) c = p.connect() self.assert_(not c.info) self.assert_(c.info is c._connection_record.info) c.info["foo"] = "bar" c.close() del c c = p.connect() self.assert_("foo" in c.info) c.invalidate() c = p.connect() self.assert_("foo" not in c.info) c.info["foo2"] = "bar2" c.detach() self.assert_("foo2" in c.info) c2 = p.connect() is_not(c.connection, c2.connection) assert not c2.info assert "foo2" in c.info def test_rec_info(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0) c = p.connect() self.assert_(not c.record_info) self.assert_(c.record_info is c._connection_record.record_info) c.record_info["foo"] = "bar" c.close() del c c = p.connect() self.assert_("foo" in c.record_info) c.invalidate() c = p.connect() self.assert_("foo" in c.record_info) c.record_info["foo2"] = "bar2" c.detach() is_(c.record_info, None) is_(c._connection_record, None) c2 = p.connect() assert c2.record_info assert "foo2" in c2.record_info def test_rec_unconnected(self): # test production of a _ConnectionRecord with an # initially unconnected state. dbapi = MockDBAPI() p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db")) r1 = pool._ConnectionRecord(p1, connect=False) assert not r1.connection c1 = r1.get_connection() is_(c1, r1.connection) def test_rec_close_reopen(self): # test that _ConnectionRecord.close() allows # the record to be reusable dbapi = MockDBAPI() p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db")) r1 = pool._ConnectionRecord(p1) c1 = r1.connection c2 = r1.get_connection() is_(c1, c2) r1.close() assert not r1.connection eq_(c1.mock_calls, [call.close()]) c2 = r1.get_connection() is_not(c1, c2) is_(c2, r1.connection) eq_(c2.mock_calls, []) @testing.combinations( ( pool.QueuePool, dict(pool_size=8, max_overflow=10, timeout=25, use_lifo=True), ), (pool.QueuePool, {}), (pool.NullPool, {}), (pool.SingletonThreadPool, {}), (pool.StaticPool, {}), (pool.AssertionPool, {}), ) def test_recreate_state(self, pool_cls, pool_args): creator = object() pool_args["pre_ping"] = True pool_args["reset_on_return"] = "commit" pool_args["recycle"] = 35 pool_args["logging_name"] = "somepool" pool_args["dialect"] = default.DefaultDialect() pool_args["echo"] = "debug" p1 = pool_cls(creator=creator, **pool_args) cls_keys = dir(pool_cls) d1 = dict(p1.__dict__) p2 = p1.recreate() d2 = dict(p2.__dict__) for k in cls_keys: d1.pop(k, None) d2.pop(k, None) for k in ( "_threadconns", "_invoke_creator", "_pool", "_overflow_lock", "_fairy", "_conn", "logger", ): if k in d2: d2[k] = mock.ANY eq_(d1, d2) eq_(p1.echo, p2.echo) is_(p1._dialect, p2._dialect) if "use_lifo" in pool_args: eq_(p1._pool.use_lifo, p2._pool.use_lifo) class PoolDialectTest(PoolTestBase): def _dialect(self): canary = [] class PoolDialect(object): def do_rollback(self, dbapi_connection): canary.append("R") dbapi_connection.rollback() def do_commit(self, dbapi_connection): canary.append("C") dbapi_connection.commit() def do_close(self, dbapi_connection): canary.append("CL") dbapi_connection.close() return PoolDialect(), canary def _do_test(self, pool_cls, assertion): mock_dbapi = MockDBAPI() dialect, canary = self._dialect() p = pool_cls(creator=mock_dbapi.connect) p._dialect = dialect conn = p.connect() conn.close() p.dispose() p.recreate() conn = p.connect() conn.close() eq_(canary, assertion) def test_queue_pool(self): self._do_test(pool.QueuePool, ["R", "CL", "R"]) def test_assertion_pool(self): self._do_test(pool.AssertionPool, ["R", "CL", "R"]) def test_singleton_pool(self): self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"]) def test_null_pool(self): self._do_test(pool.NullPool, ["R", "CL", "R", "CL"]) def test_static_pool(self): self._do_test(pool.StaticPool, ["R", "R"]) class PoolEventsTest(PoolTestBase): def _first_connect_event_fixture(self): p = self._queuepool_fixture() canary = [] def first_connect(*arg, **kw): canary.append("first_connect") event.listen(p, "first_connect", first_connect) return p, canary def _connect_event_fixture(self): p = self._queuepool_fixture() canary = [] def connect(*arg, **kw): canary.append("connect") event.listen(p, "connect", connect) return p, canary def _checkout_event_fixture(self): p = self._queuepool_fixture() canary = [] def checkout(*arg, **kw): canary.append("checkout") event.listen(p, "checkout", checkout) return p, canary def _checkin_event_fixture(self): p = self._queuepool_fixture() canary = [] def checkin(*arg, **kw): canary.append("checkin") event.listen(p, "checkin", checkin) return p, canary def _reset_event_fixture(self): p = self._queuepool_fixture() canary = [] def reset(*arg, **kw): canary.append("reset") event.listen(p, "reset", reset) return p, canary def _invalidate_event_fixture(self): p = self._queuepool_fixture() canary = Mock() event.listen(p, "invalidate", canary) return p, canary def _soft_invalidate_event_fixture(self): p = self._queuepool_fixture() canary = Mock() event.listen(p, "soft_invalidate", canary) return p, canary def _close_event_fixture(self): p = self._queuepool_fixture() canary = Mock() event.listen(p, "close", canary) return p, canary def _detach_event_fixture(self): p = self._queuepool_fixture() canary = Mock() event.listen(p, "detach", canary) return p, canary def _close_detached_event_fixture(self): p = self._queuepool_fixture() canary = Mock() event.listen(p, "close_detached", canary) return p, canary def test_close(self): p, canary = self._close_event_fixture() c1 = p.connect() connection = c1.connection rec = c1._connection_record c1.close() eq_(canary.mock_calls, []) p.dispose() eq_(canary.mock_calls, [call(connection, rec)]) def test_detach(self): p, canary = self._detach_event_fixture() c1 = p.connect() connection = c1.connection rec = c1._connection_record c1.detach() eq_(canary.mock_calls, [call(connection, rec)]) def test_detach_close(self): p, canary = self._close_detached_event_fixture() c1 = p.connect() connection = c1.connection c1.detach() c1.close() eq_(canary.mock_calls, [call(connection)]) def test_first_connect_event(self): p, canary = self._first_connect_event_fixture() p.connect() eq_(canary, ["first_connect"]) def test_first_connect_event_fires_once(self): p, canary = self._first_connect_event_fixture() p.connect() p.connect() eq_(canary, ["first_connect"]) def test_first_connect_on_previously_recreated(self): p, canary = self._first_connect_event_fixture() p2 = p.recreate() p.connect() p2.connect() eq_(canary, ["first_connect", "first_connect"]) def test_first_connect_on_subsequently_recreated(self): p, canary = self._first_connect_event_fixture() p.connect() p2 = p.recreate() p2.connect() eq_(canary, ["first_connect", "first_connect"]) def test_connect_event(self): p, canary = self._connect_event_fixture() p.connect() eq_(canary, ["connect"]) def test_connect_insert_event(self): p = self._queuepool_fixture() canary = [] def connect_one(*arg, **kw): canary.append("connect_one") def connect_two(*arg, **kw): canary.append("connect_two") def connect_three(*arg, **kw): canary.append("connect_three") event.listen(p, "connect", connect_one) event.listen(p, "connect", connect_two, insert=True) event.listen(p, "connect", connect_three) p.connect() eq_(canary, ["connect_two", "connect_one", "connect_three"]) def test_connect_event_fires_subsequent(self): p, canary = self._connect_event_fixture() c1 = p.connect() # noqa c2 = p.connect() # noqa eq_(canary, ["connect", "connect"]) def test_connect_on_previously_recreated(self): p, canary = self._connect_event_fixture() p2 = p.recreate() p.connect() p2.connect() eq_(canary, ["connect", "connect"]) def test_connect_on_subsequently_recreated(self): p, canary = self._connect_event_fixture() p.connect() p2 = p.recreate() p2.connect() eq_(canary, ["connect", "connect"]) def test_checkout_event(self): p, canary = self._checkout_event_fixture() p.connect() eq_(canary, ["checkout"]) def test_checkout_event_fires_subsequent(self): p, canary = self._checkout_event_fixture() p.connect() p.connect() eq_(canary, ["checkout", "checkout"]) def test_checkout_event_on_subsequently_recreated(self): p, canary = self._checkout_event_fixture() p.connect() p2 = p.recreate() p2.connect() eq_(canary, ["checkout", "checkout"]) def test_checkin_event(self): p, canary = self._checkin_event_fixture() c1 = p.connect() eq_(canary, []) c1.close() eq_(canary, ["checkin"]) def test_reset_event(self): p, canary = self._reset_event_fixture() c1 = p.connect() eq_(canary, []) c1.close() eq_(canary, ["reset"]) def test_soft_invalidate_event_no_exception(self): p, canary = self._soft_invalidate_event_fixture() c1 = p.connect() c1.close() assert not canary.called c1 = p.connect() dbapi_con = c1.connection c1.invalidate(soft=True) assert canary.call_args_list[0][0][0] is dbapi_con assert canary.call_args_list[0][0][2] is None def test_soft_invalidate_event_exception(self): p, canary = self._soft_invalidate_event_fixture() c1 = p.connect() c1.close() assert not canary.called c1 = p.connect() dbapi_con = c1.connection exc = Exception("hi") c1.invalidate(exc, soft=True) assert canary.call_args_list[0][0][0] is dbapi_con assert canary.call_args_list[0][0][2] is exc def test_invalidate_event_no_exception(self): p, canary = self._invalidate_event_fixture() c1 = p.connect() c1.close() assert not canary.called c1 = p.connect() dbapi_con = c1.connection c1.invalidate() assert canary.call_args_list[0][0][0] is dbapi_con assert canary.call_args_list[0][0][2] is None def test_invalidate_event_exception(self): p, canary = self._invalidate_event_fixture() c1 = p.connect() c1.close() assert not canary.called c1 = p.connect() dbapi_con = c1.connection exc = Exception("hi") c1.invalidate(exc) assert canary.call_args_list[0][0][0] is dbapi_con assert canary.call_args_list[0][0][2] is exc @testing.requires.predictable_gc def test_checkin_event_gc(self): p, canary = self._checkin_event_fixture() c1 = p.connect() eq_(canary, []) del c1 lazy_gc() eq_(canary, ["checkin"]) def test_checkin_event_on_subsequently_recreated(self): p, canary = self._checkin_event_fixture() c1 = p.connect() p2 = p.recreate() c2 = p2.connect() eq_(canary, []) c1.close() eq_(canary, ["checkin"]) c2.close() eq_(canary, ["checkin", "checkin"]) def test_listen_targets_scope(self): canary = [] def listen_one(*args): canary.append("listen_one") def listen_two(*args): canary.append("listen_two") def listen_three(*args): canary.append("listen_three") def listen_four(*args): canary.append("listen_four") engine = testing_engine(testing.db.url) event.listen(pool.Pool, "connect", listen_one) event.listen(engine.pool, "connect", listen_two) event.listen(engine, "connect", listen_three) event.listen(engine.__class__, "connect", listen_four) engine.execute(select(1)).close() eq_( canary, ["listen_one", "listen_four", "listen_two", "listen_three"] ) def test_listen_targets_per_subclass(self): """test that listen() called on a subclass remains specific to that subclass.""" canary = [] def listen_one(*args): canary.append("listen_one") def listen_two(*args): canary.append("listen_two") def listen_three(*args): canary.append("listen_three") event.listen(pool.Pool, "connect", listen_one) event.listen(pool.QueuePool, "connect", listen_two) event.listen(pool.SingletonThreadPool, "connect", listen_three) p1 = pool.QueuePool(creator=MockDBAPI().connect) p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect) assert listen_one in p1.dispatch.connect assert listen_two in p1.dispatch.connect assert listen_three not in p1.dispatch.connect assert listen_one in p2.dispatch.connect assert listen_two not in p2.dispatch.connect assert listen_three in p2.dispatch.connect p1.connect() eq_(canary, ["listen_one", "listen_two"]) p2.connect() eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"]) def test_connect_event_fails_invalidates(self): fail = False def listen_one(conn, rec): if fail: raise Exception("it failed") def listen_two(conn, rec): rec.info["important_flag"] = True p1 = pool.QueuePool( creator=MockDBAPI().connect, pool_size=1, max_overflow=0 ) event.listen(p1, "connect", listen_one) event.listen(p1, "connect", listen_two) conn = p1.connect() eq_(conn.info["important_flag"], True) conn.invalidate() conn.close() fail = True assert_raises(Exception, p1.connect) fail = False conn = p1.connect() eq_(conn.info["important_flag"], True) conn.close() def teardown(self): # TODO: need to get remove() functionality # going pool.Pool.dispatch._clear() class PoolFirstConnectSyncTest(PoolTestBase): # test [ticket:2964] @testing.requires.timing_intensive def test_sync(self): pool = self._queuepool_fixture(pool_size=3, max_overflow=0) evt = Mock() @event.listens_for(pool, "first_connect") def slow_first_connect(dbapi_con, rec): time.sleep(1) evt.first_connect() @event.listens_for(pool, "connect") def on_connect(dbapi_con, rec): evt.connect() def checkout(): for j in range(2): c1 = pool.connect() time.sleep(0.02) c1.close() time.sleep(0.02) threads = [] # what we're trying to do here is have concurrent use of # all three pooled connections at once, and the thing we want # to test is that first_connect() finishes completely before # any of the connections get returned. so first_connect() # sleeps for one second, then pings the mock. the threads should # not have made it to the "checkout() event for that one second. for i in range(5): th = threading.Thread(target=checkout) th.start() threads.append(th) for th in threads: th.join(join_timeout) # there is a very unlikely condition observed in CI on windows # where even though we have five threads above all calling upon the # pool, we didn't get concurrent use of all three connections, two # connections were enough. so here we purposely just check out # all three at once just to get a consistent test result. make_sure_all_three_are_connected = [pool.connect() for i in range(3)] for conn in make_sure_all_three_are_connected: conn.close() eq_( evt.mock_calls, [ call.first_connect(), call.connect(), call.connect(), call.connect(), ], ) class QueuePoolTest(PoolTestBase): def test_queuepool_del(self): self._do_testqueuepool(useclose=False) def test_queuepool_close(self): self._do_testqueuepool(useclose=True) def _do_testqueuepool(self, useclose=False): p = self._queuepool_fixture(pool_size=3, max_overflow=-1) reaper = testing.engines.ConnectionKiller() reaper.add_pool(p) def status(pool): return ( pool.size(), pool.checkedin(), pool.overflow(), pool.checkedout(), ) c1 = p.connect() self.assert_(status(p) == (3, 0, -2, 1)) c2 = p.connect() self.assert_(status(p) == (3, 0, -1, 2)) c3 = p.connect() self.assert_(status(p) == (3, 0, 0, 3)) c4 = p.connect() self.assert_(status(p) == (3, 0, 1, 4)) c5 = p.connect() self.assert_(status(p) == (3, 0, 2, 5)) c6 = p.connect() self.assert_(status(p) == (3, 0, 3, 6)) if useclose: c4.close() c3.close() c2.close() else: c4 = c3 = c2 = None lazy_gc() self.assert_(status(p) == (3, 3, 3, 3)) if useclose: c1.close() c5.close() c6.close() else: c1 = c5 = c6 = None lazy_gc() self.assert_(status(p) == (3, 3, 0, 0)) c1 = p.connect() c2 = p.connect() self.assert_(status(p) == (3, 1, 0, 2), status(p)) if useclose: c2.close() else: c2 = None lazy_gc() self.assert_(status(p) == (3, 2, 0, 1)) c1.close() reaper.assert_all_closed() def test_timeout_accessor(self): expected_timeout = 123 p = self._queuepool_fixture(timeout=expected_timeout) eq_(p.timeout(), expected_timeout) @testing.requires.timing_intensive def test_timeout(self): p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2) c1 = p.connect() # noqa c2 = p.connect() # noqa c3 = p.connect() # noqa now = time.time() assert_raises(tsa.exc.TimeoutError, p.connect) assert int(time.time() - now) == 2 @testing.requires.timing_intensive def test_timeout_subsecond_precision(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0, timeout=0.5) c1 = p.connect() # noqa with expect_raises(tsa.exc.TimeoutError): now = time.time() c2 = p.connect() # noqa # Python timing is not very accurate, the time diff should be very # close to 0.5s but we give 200ms of slack. assert 0.3 <= time.time() - now <= 0.7, "Pool timeout not respected" @testing.requires.threading_with_mock @testing.requires.timing_intensive def test_timeout_race(self): # test a race condition where the initial connecting threads all race # to queue.Empty, then block on the mutex. each thread consumes a # connection as they go in. when the limit is reached, the remaining # threads go in, and get TimeoutError; even though they never got to # wait for the timeout on queue.get(). the fix involves checking the # timeout again within the mutex, and if so, unlocking and throwing # them back to the start of do_get() dbapi = MockDBAPI() p = pool.QueuePool( creator=lambda: dbapi.connect(delay=0.05), pool_size=2, max_overflow=1, timeout=3, ) timeouts = [] def checkout(): for x in range(1): now = time.time() try: c1 = p.connect() except tsa.exc.TimeoutError: timeouts.append(time.time() - now) continue time.sleep(4) c1.close() threads = [] for i in range(10): th = threading.Thread(target=checkout) th.start() threads.append(th) for th in threads: th.join(join_timeout) assert len(timeouts) > 0 for t in timeouts: assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts # normally, the timeout should under 4 seconds, # but on a loaded down buildbot it can go up. assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts def _test_overflow(self, thread_count, max_overflow): reaper = testing.engines.ConnectionKiller() dbapi = MockDBAPI() mutex = threading.Lock() def creator(): time.sleep(0.05) with mutex: return dbapi.connect() p = pool.QueuePool( creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow ) reaper.add_pool(p) peaks = [] def whammy(): for i in range(10): try: con = p.connect() time.sleep(0.005) peaks.append(p.overflow()) con.close() del con except tsa.exc.TimeoutError: pass threads = [] for i in range(thread_count): th = threading.Thread(target=whammy) th.start() threads.append(th) for th in threads: th.join(join_timeout) self.assert_(max(peaks) <= max_overflow) reaper.assert_all_closed() def test_overflow_reset_on_failed_connect(self): dbapi = Mock() def failing_dbapi(): raise Exception("connection failed") creator = dbapi.connect def create(): return creator() p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3) c1 = self._with_teardown(p.connect()) # noqa c2 = self._with_teardown(p.connect()) # noqa c3 = self._with_teardown(p.connect()) # noqa eq_(p._overflow, 1) creator = failing_dbapi assert_raises(Exception, p.connect) eq_(p._overflow, 1) @testing.requires.threading_with_mock @testing.requires.timing_intensive def test_hanging_connect_within_overflow(self): """test that a single connect() call which is hanging does not block other connections from proceeding.""" dbapi = Mock() mutex = threading.Lock() def hanging_dbapi(): time.sleep(2) with mutex: return dbapi.connect() def fast_dbapi(): with mutex: return dbapi.connect() creator = threading.local() def create(): return creator.mock_connector() def run_test(name, pool, should_hang): if should_hang: creator.mock_connector = hanging_dbapi else: creator.mock_connector = fast_dbapi conn = pool.connect() conn.operation(name) time.sleep(1) conn.close() p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3) threads = [ threading.Thread(target=run_test, args=("success_one", p, False)), threading.Thread(target=run_test, args=("success_two", p, False)), threading.Thread(target=run_test, args=("overflow_one", p, True)), threading.Thread(target=run_test, args=("overflow_two", p, False)), threading.Thread( target=run_test, args=("overflow_three", p, False) ), ] for t in threads: t.start() time.sleep(0.2) for t in threads: t.join(timeout=join_timeout) eq_( dbapi.connect().operation.mock_calls, [ call("success_one"), call("success_two"), call("overflow_two"), call("overflow_three"), call("overflow_one"), ], ) @testing.requires.threading_with_mock @testing.requires.timing_intensive def test_waiters_handled(self): """test that threads waiting for connections are handled when the pool is replaced. """ mutex = threading.Lock() dbapi = MockDBAPI() def creator(): with mutex: return dbapi.connect() success = [] for timeout in (None, 30): for max_overflow in (0, -1, 3): p = pool.QueuePool( creator=creator, pool_size=2, timeout=timeout, max_overflow=max_overflow, ) def waiter(p, timeout, max_overflow): success_key = (timeout, max_overflow) conn = p.connect() success.append(success_key) time.sleep(0.1) conn.close() c1 = p.connect() # noqa c2 = p.connect() threads = [] for i in range(2): t = threading.Thread( target=waiter, args=(p, timeout, max_overflow) ) t.daemon = True t.start() threads.append(t) # this sleep makes sure that the # two waiter threads hit upon wait() # inside the queue, before we invalidate the other # two conns time.sleep(0.2) p._invalidate(c2) for t in threads: t.join(join_timeout) eq_(len(success), 12, "successes: %s" % success) def test_connrec_invalidated_within_checkout_no_race(self): """Test that a concurrent ConnectionRecord.invalidate() which occurs after the ConnectionFairy has called _ConnectionRecord.checkout() but before the ConnectionFairy tests "fairy.connection is None" will not result in an InvalidRequestError. This use case assumes that a listener on the checkout() event will be raising DisconnectionError so that a reconnect attempt may occur. """ dbapi = MockDBAPI() def creator(): return dbapi.connect() p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0) conn = p.connect() conn.close() _existing_checkout = pool._ConnectionRecord.checkout @classmethod def _decorate_existing_checkout(cls, *arg, **kw): fairy = _existing_checkout(*arg, **kw) connrec = fairy._connection_record connrec.invalidate() return fairy with patch( "sqlalchemy.pool._ConnectionRecord.checkout", _decorate_existing_checkout, ): conn = p.connect() is_(conn._connection_record.connection, None) conn.close() @testing.requires.threading_with_mock @testing.requires.timing_intensive def test_notify_waiters(self): dbapi = MockDBAPI() canary = [] def creator(): canary.append(1) return dbapi.connect() p1 = pool.QueuePool( creator=creator, pool_size=1, timeout=None, max_overflow=0 ) def waiter(p): conn = p.connect() canary.append(2) time.sleep(0.5) conn.close() c1 = p1.connect() threads = [] for i in range(5): t = threading.Thread(target=waiter, args=(p1,)) t.start() threads.append(t) time.sleep(0.5) eq_(canary, [1]) # this also calls invalidate() # on c1 p1._invalidate(c1) for t in threads: t.join(join_timeout) eq_(canary, [1, 1, 2, 2, 2, 2, 2]) def test_dispose_closes_pooled(self): dbapi = MockDBAPI() p = pool.QueuePool( creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0 ) c1 = p.connect() c2 = p.connect() c1_con = c1.connection c2_con = c2.connection c1.close() eq_(c1_con.close.call_count, 0) eq_(c2_con.close.call_count, 0) p.dispose() eq_(c1_con.close.call_count, 1) eq_(c2_con.close.call_count, 0) # currently, if a ConnectionFairy is closed # after the pool has been disposed, there's no # flag that states it should be invalidated # immediately - it just gets returned to the # pool normally... c2.close() eq_(c1_con.close.call_count, 1) eq_(c2_con.close.call_count, 0) # ...and that's the one we'll get back next. c3 = p.connect() assert c3.connection is c2_con @testing.requires.threading_with_mock @testing.requires.timing_intensive def test_no_overflow(self): self._test_overflow(40, 0) @testing.requires.threading_with_mock @testing.requires.timing_intensive def test_max_overflow(self): self._test_overflow(40, 5) def test_overflow_no_gc(self): p = self._queuepool_fixture(pool_size=2, max_overflow=2) # disable weakref collection of the # underlying connections strong_refs = set() def _conn(): c = p.connect() strong_refs.add(c.connection) return c for j in range(5): # open 4 conns at a time. each time this # will yield two pooled connections + two # overflow connections. conns = [_conn() for i in range(4)] for c in conns: c.close() # doing that for a total of 5 times yields # ten overflow connections closed plus the # two pooled connections unclosed. eq_( set([c.close.call_count for c in strong_refs]), set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]), ) def test_recycle(self): with patch("sqlalchemy.pool.base.time.time") as mock: mock.return_value = 10000 p = self._queuepool_fixture( pool_size=1, max_overflow=0, recycle=30 ) c1 = p.connect() c_ref = weakref.ref(c1.connection) c1.close() mock.return_value = 10001 c2 = p.connect() is_(c2.connection, c_ref()) c2.close() mock.return_value = 10035 c3 = p.connect() is_not(c3.connection, c_ref()) @testing.requires.timing_intensive def test_recycle_on_invalidate(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c_ref = weakref.ref(c1.connection) c1.close() c2 = p.connect() is_(c2.connection, c_ref()) c2_rec = c2._connection_record p._invalidate(c2) assert c2_rec.connection is None c2.close() time.sleep(0.5) c3 = p.connect() is_not(c3.connection, c_ref()) @testing.requires.timing_intensive def test_recycle_on_soft_invalidate(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c_ref = weakref.ref(c1.connection) c1.close() c2 = p.connect() is_(c2.connection, c_ref()) c2_rec = c2._connection_record # ensure pool invalidate time will be later than starttime # for ConnectionRecord objects above time.sleep(0.1) c2.invalidate(soft=True) is_(c2_rec.connection, c2.connection) c2.close() c3 = p.connect() is_not(c3.connection, c_ref()) is_(c3._connection_record, c2_rec) is_(c2_rec.connection, c3.connection) def _no_wr_finalize(self): finalize_fairy = pool._finalize_fairy def assert_no_wr_callback( connection, connection_record, pool, ref, echo, fairy=None ): if fairy is None: raise AssertionError( "finalize fairy was called as a weakref callback" ) return finalize_fairy( connection, connection_record, pool, ref, echo, fairy ) return patch.object(pool, "_finalize_fairy", assert_no_wr_callback) def _assert_cleanup_on_pooled_reconnect(self, dbapi, p): # p is QueuePool with size=1, max_overflow=2, # and one connection in the pool that will need to # reconnect when next used (either due to recycle or invalidate) with self._no_wr_finalize(): eq_(p.checkedout(), 0) eq_(p._overflow, 0) dbapi.shutdown(True) assert_raises_context_ok(Exception, p.connect) eq_(p._overflow, 0) eq_(p.checkedout(), 0) # and not 1 dbapi.shutdown(False) c1 = self._with_teardown(p.connect()) # noqa assert p._pool.empty() # poolsize is one, so we're empty OK c2 = self._with_teardown(p.connect()) # noqa eq_(p._overflow, 1) # and not 2 # this hangs if p._overflow is 2 c3 = self._with_teardown(p.connect()) c3.close() def test_error_on_pooled_reconnect_cleanup_invalidate(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2) c1 = p.connect() c1.invalidate() c1.close() self._assert_cleanup_on_pooled_reconnect(dbapi, p) @testing.requires.timing_intensive def test_error_on_pooled_reconnect_cleanup_recycle(self): dbapi, p = self._queuepool_dbapi_fixture( pool_size=1, max_overflow=2, recycle=1 ) c1 = p.connect() c1.close() time.sleep(1.5) self._assert_cleanup_on_pooled_reconnect(dbapi, p) @testing.requires.timing_intensive def test_connect_handler_not_called_for_recycled(self): """test [ticket:3497]""" dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2) canary = Mock() c1 = p.connect() c2 = p.connect() c1.close() c2.close() dbapi.shutdown(True) # ensure pool invalidate time will be later than starttime # for ConnectionRecord objects above time.sleep(0.1) bad = p.connect() p._invalidate(bad) bad.close() assert p._invalidate_time event.listen(p, "connect", canary.connect) event.listen(p, "checkout", canary.checkout) assert_raises(Exception, p.connect) p._pool.queue = collections.deque( [c for c in p._pool.queue if c.connection is not None] ) dbapi.shutdown(False) c = p.connect() c.close() eq_( canary.mock_calls, [call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)], ) @testing.requires.timing_intensive def test_connect_checkout_handler_always_gets_info(self): """test [ticket:3497]""" dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2) c1 = p.connect() c2 = p.connect() c1.close() c2.close() dbapi.shutdown(True) # ensure pool invalidate time will be later than starttime # for ConnectionRecord objects above time.sleep(0.1) bad = p.connect() p._invalidate(bad) bad.close() assert p._invalidate_time @event.listens_for(p, "connect") def connect(conn, conn_rec): conn_rec.info["x"] = True @event.listens_for(p, "checkout") def checkout(conn, conn_rec, conn_f): assert "x" in conn_rec.info assert_raises(Exception, p.connect) p._pool.queue = collections.deque( [c for c in p._pool.queue if c.connection is not None] ) dbapi.shutdown(False) c = p.connect() c.close() def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2) c1 = p.connect() c1.close() @event.listens_for(p, "checkout") def handle_checkout_event(dbapi_con, con_record, con_proxy): if dbapi.is_shutdown: raise tsa.exc.DisconnectionError() self._assert_cleanup_on_pooled_reconnect(dbapi, p) @testing.requires.predictable_gc def test_userspace_disconnectionerror_weakref_finalizer(self): dbapi, pool = self._queuepool_dbapi_fixture( pool_size=1, max_overflow=2 ) @event.listens_for(pool, "checkout") def handle_checkout_event(dbapi_con, con_record, con_proxy): if getattr(dbapi_con, "boom") == "yes": raise tsa.exc.DisconnectionError() conn = pool.connect() old_dbapi_conn = conn.connection conn.close() eq_(old_dbapi_conn.mock_calls, [call.rollback()]) old_dbapi_conn.boom = "yes" conn = pool.connect() dbapi_conn = conn.connection del conn gc_collect() # new connection was reset on return appropriately eq_(dbapi_conn.mock_calls, [call.rollback()]) # old connection was just closed - did not get an # erroneous reset on return eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()]) @testing.requires.timing_intensive def test_recycle_pool_no_race(self): def slow_close(): slow_closing_connection._slow_close() time.sleep(0.5) slow_closing_connection = Mock() slow_closing_connection.connect.return_value.close = slow_close class Error(Exception): pass dialect = Mock() dialect.is_disconnect = lambda *arg, **kw: True dialect.dbapi.Error = Error pools = [] class TrackQueuePool(pool.QueuePool): def __init__(self, *arg, **kw): pools.append(self) super(TrackQueuePool, self).__init__(*arg, **kw) def creator(): return slow_closing_connection.connect() p1 = TrackQueuePool(creator=creator, pool_size=20) from sqlalchemy import create_engine eng = create_engine(testing.db.url, pool=p1, _initialize=False) eng.dialect = dialect # 15 total connections conns = [eng.connect() for i in range(15)] # return 8 back to the pool for conn in conns[3:10]: conn.close() def attempt(conn): time.sleep(random.random()) try: conn._handle_dbapi_exception( Error(), "statement", {}, Mock(), Mock() ) except tsa.exc.DBAPIError: pass # run an error + invalidate operation on the remaining 7 open # connections threads = [] for conn in conns: t = threading.Thread(target=attempt, args=(conn,)) t.start() threads.append(t) for t in threads: t.join() # return all 15 connections to the pool for conn in conns: conn.close() # re-open 15 total connections conns = [eng.connect() for i in range(15)] # 15 connections have been fully closed due to invalidate assert slow_closing_connection._slow_close.call_count == 15 # 15 initial connections + 15 reconnections assert slow_closing_connection.connect.call_count == 30 assert len(pools) <= 2, len(pools) def test_invalidate(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c_id = c1.connection.id c1.close() c1 = None c1 = p.connect() assert c1.connection.id == c_id c1.invalidate() c1 = None c1 = p.connect() assert c1.connection.id != c_id def test_recreate(self): p = self._queuepool_fixture( reset_on_return=None, pool_size=1, max_overflow=0 ) p2 = p.recreate() assert p2.size() == 1 assert p2._reset_on_return is pool.reset_none assert p2._max_overflow == 0 def test_reconnect(self): """tests reconnect operations at the pool level. SA's engine/dialect includes another layer of reconnect support for 'database was lost' errors.""" dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c_id = c1.connection.id c1.close() c1 = None c1 = p.connect() assert c1.connection.id == c_id dbapi.raise_error = True c1.invalidate() c1 = None c1 = p.connect() assert c1.connection.id != c_id def test_detach(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c1.detach() c2 = p.connect() # noqa eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")]) c1_con = c1.connection assert c1_con is not None eq_(c1_con.close.call_count, 0) c1.close() eq_(c1_con.close.call_count, 1) def test_detach_via_invalidate(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c1_con = c1.connection c1.invalidate() assert c1.connection is None eq_(c1_con.close.call_count, 1) c2 = p.connect() assert c2.connection is not c1_con c2_con = c2.connection c2.close() eq_(c2_con.close.call_count, 0) def test_no_double_checkin(self): p = self._queuepool_fixture(pool_size=1) c1 = p.connect() rec = c1._connection_record c1.close() assert_raises_message( Warning, "Double checkin attempted on %s" % rec, rec.checkin ) def test_lifo(self): c1, c2, c3 = Mock(), Mock(), Mock() connections = [c1, c2, c3] def creator(): return connections.pop(0) p = pool.QueuePool(creator, use_lifo=True) pc1 = p.connect() pc2 = p.connect() pc3 = p.connect() pc1.close() pc2.close() pc3.close() for i in range(5): pc1 = p.connect() is_(pc1.connection, c3) pc1.close() pc1 = p.connect() is_(pc1.connection, c3) pc2 = p.connect() is_(pc2.connection, c2) pc2.close() pc3 = p.connect() is_(pc3.connection, c2) pc2 = p.connect() is_(pc2.connection, c1) pc2.close() pc3.close() pc1.close() def test_fifo(self): c1, c2, c3 = Mock(), Mock(), Mock() connections = [c1, c2, c3] def creator(): return connections.pop(0) p = pool.QueuePool(creator) pc1 = p.connect() pc2 = p.connect() pc3 = p.connect() pc1.close() pc2.close() pc3.close() pc1 = p.connect() is_(pc1.connection, c1) pc1.close() pc1 = p.connect() is_(pc1.connection, c2) pc2 = p.connect() is_(pc2.connection, c3) pc2.close() pc3 = p.connect() is_(pc3.connection, c1) pc2 = p.connect() is_(pc2.connection, c3) pc2.close() pc3.close() pc1.close() class ResetOnReturnTest(PoolTestBase): def _fixture(self, **kw): dbapi = Mock() return ( dbapi, pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw), ) def test_plain_rollback(self): dbapi, p = self._fixture(reset_on_return="rollback") c1 = p.connect() c1.close() assert dbapi.connect().rollback.called assert not dbapi.connect().commit.called def test_plain_commit(self): dbapi, p = self._fixture(reset_on_return="commit") c1 = p.connect() c1.close() assert not dbapi.connect().rollback.called assert dbapi.connect().commit.called def test_plain_none(self): dbapi, p = self._fixture(reset_on_return=None) c1 = p.connect() c1.close() assert not dbapi.connect().rollback.called assert not dbapi.connect().commit.called def test_agent_rollback(self): dbapi, p = self._fixture(reset_on_return="rollback") class Agent(object): def __init__(self, conn): self.conn = conn is_active = True def rollback(self): self.conn.special_rollback() def commit(self): self.conn.special_commit() c1 = p.connect() c1._reset_agent = Agent(c1) c1.close() assert dbapi.connect().special_rollback.called assert not dbapi.connect().special_commit.called assert not dbapi.connect().rollback.called assert not dbapi.connect().commit.called c1 = p.connect() c1.close() eq_(dbapi.connect().special_rollback.call_count, 1) eq_(dbapi.connect().special_commit.call_count, 0) assert dbapi.connect().rollback.called assert not dbapi.connect().commit.called def test_agent_commit(self): dbapi, p = self._fixture(reset_on_return="commit") class Agent(object): def __init__(self, conn): self.conn = conn is_active = True def rollback(self): self.conn.special_rollback() def commit(self): self.conn.special_commit() c1 = p.connect() c1._reset_agent = Agent(c1) c1.close() assert not dbapi.connect().special_rollback.called assert dbapi.connect().special_commit.called assert not dbapi.connect().rollback.called assert not dbapi.connect().commit.called c1 = p.connect() c1.close() eq_(dbapi.connect().special_rollback.call_count, 0) eq_(dbapi.connect().special_commit.call_count, 1) assert not dbapi.connect().rollback.called assert dbapi.connect().commit.called def test_reset_agent_disconnect(self): dbapi, p = self._fixture(reset_on_return="rollback") class Agent(object): def __init__(self, conn): self.conn = conn def rollback(self): p._invalidate(self.conn) raise Exception("hi") def commit(self): self.conn.commit() c1 = p.connect() c1._reset_agent = Agent(c1) c1.close() # no warning raised. We know it would warn due to # QueuePoolTest.test_no_double_checkin class SingletonThreadPoolTest(PoolTestBase): @testing.requires.threading_with_mock def test_cleanup(self): self._test_cleanup(False) # TODO: the SingletonThreadPool cleanup method # has an unfixed race condition within the "cleanup" system that # leads to this test being off by one connection under load; in any # case, this connection will be closed once it is garbage collected. # this pool is not a production-level pool and is only used for the # SQLite "memory" connection, and is not very useful under actual # multi-threaded conditions # @testing.requires.threading_with_mock # def test_cleanup_no_gc(self): # self._test_cleanup(True) def _test_cleanup(self, strong_refs): """test that the pool's connections are OK after cleanup() has been called.""" dbapi = MockDBAPI() lock = threading.Lock() def creator(): # the mock iterator isn't threadsafe... with lock: return dbapi.connect() p = pool.SingletonThreadPool(creator=creator, pool_size=3) if strong_refs: sr = set() def _conn(): c = p.connect() sr.add(c.connection) return c else: def _conn(): return p.connect() def checkout(): for x in range(10): c = _conn() assert c c.cursor() c.close() time.sleep(0.01) threads = [] for i in range(10): th = threading.Thread(target=checkout) th.start() threads.append(th) for th in threads: th.join(join_timeout) lp = len(p._all_conns) is_true(3 <= lp <= 4) if strong_refs: still_opened = len([c for c in sr if not c.close.call_count]) eq_(still_opened, 3) def test_no_rollback_from_nested_connections(self): dbapi = MockDBAPI() lock = threading.Lock() def creator(): # the mock iterator isn't threadsafe... with lock: return dbapi.connect() p = pool.SingletonThreadPool(creator=creator, pool_size=3) c1 = p.connect() mock_conn = c1.connection c2 = p.connect() is_(c1, c2) c2.close() eq_(mock_conn.mock_calls, []) c1.close() eq_(mock_conn.mock_calls, [call.rollback()]) class AssertionPoolTest(PoolTestBase): def test_connect_error(self): dbapi = MockDBAPI() p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db")) c1 = p.connect() # noqa assert_raises(AssertionError, p.connect) def test_connect_multiple(self): dbapi = MockDBAPI() p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db")) c1 = p.connect() c1.close() c2 = p.connect() c2.close() c3 = p.connect() # noqa assert_raises(AssertionError, p.connect) class NullPoolTest(PoolTestBase): def test_reconnect(self): dbapi = MockDBAPI() p = pool.NullPool(creator=lambda: dbapi.connect("foo.db")) c1 = p.connect() c1.close() c1 = None c1 = p.connect() c1.invalidate() c1 = None c1 = p.connect() dbapi.connect.assert_has_calls( [call("foo.db"), call("foo.db")], any_order=True ) class StaticPoolTest(PoolTestBase): def test_recreate(self): dbapi = MockDBAPI() def creator(): return dbapi.connect("foo.db") p = pool.StaticPool(creator) p2 = p.recreate() assert p._creator is p2._creator class CreatorCompatibilityTest(PoolTestBase): def test_creator_callable_outside_noarg(self): e = testing_engine() creator = e.pool._creator try: conn = creator() finally: conn.close() def test_creator_callable_outside_witharg(self): e = testing_engine() creator = e.pool._creator try: conn = creator(Mock()) finally: conn.close() def test_creator_patching_arg_to_noarg(self): e = testing_engine() creator = e.pool._creator try: # the creator is the two-arg form conn = creator(Mock()) finally: conn.close() def mock_create(): return creator() conn = e.connect() conn.invalidate() conn.close() # test that the 'should_wrap_creator' status # will dynamically switch if the _creator is monkeypatched. # patch it with a zero-arg form with patch.object(e.pool, "_creator", mock_create): conn = e.connect() conn.invalidate() conn.close() conn = e.connect() conn.close()
py
1a49cf60fcbae5be441cc32466ffd1f47cca8cbd
class Unbuffered(object): def __init__(self, stream): self.stream = stream def write(self, data): self.stream.write(data) self.stream.flush() def writelines(self, datas): self.stream.writelines(datas) self.stream.flush() def __getattr__(self, attr): return getattr(self.stream, attr) import sys import os import fnmatch from subprocess import call import subprocess import os import time sys.stdout = Unbuffered(sys.stdout) """ You need to edit the 3 lines below as your versions get upgraded... """ fortifyVersion="20.1.0" jreVersion="1.8.0_202" # fprUtil="C:\\PROGRA~1\\Fortify\\Fortify_SCA_and_Apps_"+fortifyVersion+"\\bin\\FPRUtility.bat" fprUtil = shutil.which("FPRUtility.bat") files = [f for f in os.listdir('.') if os.path.isfile(f)] for filename in files: if fnmatch.fnmatch(filename, '*.fpr'): now = time.strftime("%c") print ("") print ("------------------------------------------------------------") print ("Fortify Report filename: " + filename) print ("Report start: %s" % now ) print ("------------------------------------------------------------") print ("\r\n") print ("Scan Date: ") print ("Scanned: files, LOC (Executable)") print ("Gross Issues: (0 critical, 0 high, 0 medium, 0 low)") print ("Files: ") print ("Executable LoC: ") print ("Total LoC: ") print ("Certified: Results Certification Valid") print ("Warnings: ") print ("SCA Engine Version: HPE Security Fortify Static Code Analyzer " + fortifyVersion + " (using JRE " + jreVersion + ")") print ("\r\n") print ("The following are Fortify Issue Severity Counts:") print ("-----------------------------------------------") print ("CRITICAL: _") print ("HIGH: _") print ("MEDIUM: _") print ("LOW: _") print ("FALSE POSITIVE: _") print ("Total for all severities: _ Issues") print ("\r\n") print ("The following are Fortify analyzer Issue Counts by Criticality:") print ("--------------------------------------------------------------") print ("CRITICAL") print ("--------") print ("\r\n") print ("HIGH") print ("--------") print ("\r\n") print ("MEDIUM") print ("--------") print ("\r\n") print ("LOW") print ("--------") print ("\r\n") print ("FALSE POSITIVE") print ("--------------") print ("\r\n") print ("------------------------------------------------------------") print ("Fortify SCA Category Issue Counts for: " + filename) print ("------------------------------------------------------------") os.system(fprUtil + " -information -categoryIssueCounts -project " + filename) print ("") print ("------------------------------------------------------------") print ("Fortify SCA Analyzer Issue Counts for: " + filename) print ("------------------------------------------------------------") os.system(fprUtil + " -information -analyzerIssueCounts -project " + filename) print ("") print ("------------------------------------------------------------") print ("Fortify SCA Errors for: " + filename) print ("------------------------------------------------------------") os.system(fprUtil + " -information -errors -project " + filename) print ("") print ("") print ("------------------------------------------------------------") print ("Done with ad-hoc reporting on: " + filename) print ("------------------------------------------------------------") now = time.strftime("%c") print ("Report end: %s" % now ) print ("------------------------------------------------------------") print ("")
py
1a49d037838573b14ef0b2f49033a7c67fe1b45c
# -*- coding: utf-8 -*- # Authors: Mark Wronkiewicz <[email protected]> # Yousra Bekhti <[email protected]> # Eric Larson <[email protected]> # # License: BSD-3-Clause from collections.abc import Iterable import numpy as np from ..event import _get_stim_channel from .._ola import _Interp2 from ..io.pick import (pick_types, pick_info, pick_channels, pick_channels_forward) from ..cov import make_ad_hoc_cov, read_cov, Covariance from ..bem import fit_sphere_to_headshape, make_sphere_model, read_bem_solution from ..io import RawArray, BaseRaw, Info from ..chpi import (read_head_pos, head_pos_to_trans_rot_t, get_chpi_info, _get_hpi_initial_fit) from ..io.constants import FIFF from ..forward import (_magnetic_dipole_field_vec, _merge_meg_eeg_fwds, _stc_src_sel, convert_forward_solution, _prepare_for_forward, _transform_orig_meg_coils, _compute_forwards, _to_forward_dict, restrict_forward_to_stc, _prep_meg_channels) from ..transforms import _get_trans, transform_surface_to from ..source_space import (_ensure_src, _set_source_space_vertices, setup_volume_source_space) from ..source_estimate import _BaseSourceEstimate from ..surface import _CheckInside from ..utils import (logger, verbose, check_random_state, _pl, _validate_type, _check_preload) from ..parallel import check_n_jobs from .source import SourceSimulator def _check_cov(info, cov): """Check that the user provided a valid covariance matrix for the noise.""" if isinstance(cov, Covariance) or cov is None: pass elif isinstance(cov, dict): cov = make_ad_hoc_cov(info, cov, verbose=False) elif isinstance(cov, str): if cov == 'simple': cov = make_ad_hoc_cov(info, None, verbose=False) else: cov = read_cov(cov, verbose=False) else: raise TypeError('Covariance matrix type not recognized. Valid input ' 'types are: instance of Covariance, dict, str, None. ' ', got %s' % (cov,)) return cov def _check_stc_iterable(stc, info): # 1. Check that our STC is iterable (or convert it to one using cycle) # 2. Do first iter so we can get the vertex subselection # 3. Get the list of verts, which must stay the same across iterations if isinstance(stc, _BaseSourceEstimate): stc = [stc] _validate_type(stc, Iterable, 'SourceEstimate, tuple, or iterable') stc_enum = enumerate(stc) del stc try: stc_counted = next(stc_enum) except StopIteration: raise RuntimeError('Iterable did not provide stc[0]') _, _, verts = _stc_data_event(stc_counted, 1, info['sfreq']) return stc_enum, stc_counted, verts def _log_ch(start, info, ch): """Log channel information.""" if ch is not None: extra, just, ch = ' stored on channel:', 50, info['ch_names'][ch] else: extra, just, ch = ' not stored', 0, '' logger.info((start + extra).ljust(just) + ch) def _check_head_pos(head_pos, info, first_samp, times=None): if head_pos is None: # use pos from info['dev_head_t'] head_pos = dict() if isinstance(head_pos, str): # can be a head pos file head_pos = read_head_pos(head_pos) if isinstance(head_pos, np.ndarray): # can be head_pos quats head_pos = head_pos_to_trans_rot_t(head_pos) if isinstance(head_pos, tuple): # can be quats converted to trans, rot, t transs, rots, ts = head_pos first_time = first_samp / info['sfreq'] ts = ts - first_time # MF files need reref dev_head_ts = [np.r_[np.c_[r, t[:, np.newaxis]], [[0, 0, 0, 1]]] for r, t in zip(rots, transs)] del transs, rots elif isinstance(head_pos, dict): ts = np.array(list(head_pos.keys()), float) ts.sort() dev_head_ts = [head_pos[float(tt)] for tt in ts] else: raise TypeError('unknown head_pos type %s' % type(head_pos)) bad = ts < 0 if bad.any(): raise RuntimeError('All position times must be >= 0, found %s/%s' '< 0' % (bad.sum(), len(bad))) if times is not None: bad = ts > times[-1] if bad.any(): raise RuntimeError('All position times must be <= t_end (%0.1f ' 'sec), found %s/%s bad values (is this a split ' 'file?)' % (times[-1], bad.sum(), len(bad))) # If it starts close to zero, make it zero (else unique(offset) fails) if len(ts) > 0 and ts[0] < (0.5 / info['sfreq']): ts[0] = 0. # If it doesn't start at zero, insert one at t=0 elif len(ts) == 0 or ts[0] > 0: ts = np.r_[[0.], ts] dev_head_ts.insert(0, info['dev_head_t']['trans']) dev_head_ts = [{'trans': d, 'to': info['dev_head_t']['to'], 'from': info['dev_head_t']['from']} for d in dev_head_ts] offsets = np.round(ts * info['sfreq']).astype(int) assert np.array_equal(offsets, np.unique(offsets)) assert len(offsets) == len(dev_head_ts) offsets = list(offsets) return dev_head_ts, offsets @verbose def simulate_raw(info, stc=None, trans=None, src=None, bem=None, head_pos=None, mindist=1.0, interp='cos2', n_jobs=1, use_cps=True, forward=None, first_samp=0, max_iter=10000, verbose=None): u"""Simulate raw data. Head movements can optionally be simulated using the ``head_pos`` parameter. Parameters ---------- %(info_not_none)s Used for simulation. .. versionchanged:: 0.18 Support for :class:`mne.Info`. stc : iterable | SourceEstimate | SourceSimulator The source estimates to use to simulate data. Each must have the same sample rate as the raw data, and the vertices of all stcs in the iterable must match. Each entry in the iterable can also be a tuple of ``(SourceEstimate, ndarray)`` to allow specifying the stim channel (e.g., STI001) data accompany the source estimate. See Notes for details. .. versionchanged:: 0.18 Support for tuple, iterable of tuple or `~mne.SourceEstimate`, or `~mne.simulation.SourceSimulator`. trans : dict | str | None Either a transformation filename (usually made using mne_analyze) or an info dict (usually opened using read_trans()). If string, an ending of ``.fif`` or ``.fif.gz`` will be assumed to be in FIF format, any other ending will be assumed to be a text file with a 4x4 transformation matrix (like the ``--trans`` MNE-C option). If trans is None, an identity transform will be used. src : str | instance of SourceSpaces | None Source space corresponding to the stc. If string, should be a source space filename. Can also be an instance of loaded or generated SourceSpaces. Can be None if ``forward`` is provided. bem : str | dict | None BEM solution corresponding to the stc. If string, should be a BEM solution filename (e.g., "sample-5120-5120-5120-bem-sol.fif"). Can be None if ``forward`` is provided. %(head_pos)s See for example :footcite:`LarsonTaulu2017`. mindist : float Minimum distance between sources and the inner skull boundary to use during forward calculation. %(interp)s %(n_jobs)s %(use_cps)s forward : instance of Forward | None The forward operator to use. If None (default) it will be computed using ``bem``, ``trans``, and ``src``. If not None, ``bem``, ``trans``, and ``src`` are ignored. .. versionadded:: 0.17 first_samp : int The first_samp property in the output Raw instance. .. versionadded:: 0.18 max_iter : int The maximum number of STC iterations to allow. This is a sanity parameter to prevent accidental blowups. .. versionadded:: 0.18 %(verbose)s Returns ------- raw : instance of Raw The simulated raw file. See Also -------- mne.chpi.read_head_pos add_chpi add_noise add_ecg add_eog simulate_evoked simulate_stc simulate_sparse_stc Notes ----- **Stim channel encoding** By default, the stimulus channel will have the head position number (starting at 1) stored in the trigger channel (if available) at the t=0 point in each repetition of the ``stc``. If ``stc`` is a tuple of ``(SourceEstimate, ndarray)`` the array values will be placed in the stim channel aligned with the :class:`mne.SourceEstimate`. **Data simulation** In the most advanced case where ``stc`` is an iterable of tuples the output will be concatenated in time as: .. table:: Data alignment and stim channel encoding +---------+--------------------------+--------------------------+---------+ | Channel | Data | +=========+==========================+==========================+=========+ | M/EEG | ``fwd @ stc[0][0].data`` | ``fwd @ stc[1][0].data`` | ``...`` | +---------+--------------------------+--------------------------+---------+ | STIM | ``stc[0][1]`` | ``stc[1][1]`` | ``...`` | +---------+--------------------------+--------------------------+---------+ | | *time →* | +---------+--------------------------+--------------------------+---------+ .. versionadded:: 0.10.0 References ---------- .. footbibliography:: """ # noqa: E501 _validate_type(info, Info, 'info') raw_verbose = verbose if len(pick_types(info, meg=False, stim=True)) == 0: event_ch = None else: event_ch = pick_channels(info['ch_names'], _get_stim_channel(None, info))[0] n_jobs = check_n_jobs(n_jobs) if forward is not None: if any(x is not None for x in (trans, src, bem, head_pos)): raise ValueError('If forward is not None then trans, src, bem, ' 'and head_pos must all be None') if not np.allclose(forward['info']['dev_head_t']['trans'], info['dev_head_t']['trans'], atol=1e-6): raise ValueError('The forward meg<->head transform ' 'forward["info"]["dev_head_t"] does not match ' 'the one in raw.info["dev_head_t"]') src = forward['src'] dev_head_ts, offsets = _check_head_pos(head_pos, info, first_samp, None) src = _ensure_src(src, verbose=False) if isinstance(bem, str): bem = read_bem_solution(bem, verbose=False) # Extract necessary info meeg_picks = pick_types(info, meg=True, eeg=True, exclude=[]) logger.info('Setting up raw simulation: %s position%s, "%s" interpolation' % (len(dev_head_ts), _pl(dev_head_ts), interp)) if isinstance(stc, SourceSimulator) and stc.first_samp != first_samp: logger.info('SourceSimulator first_samp does not match argument.') stc_enum, stc_counted, verts = _check_stc_iterable(stc, info) if forward is not None: forward = restrict_forward_to_stc(forward, verts) src = forward['src'] else: _stc_src_sel(src, verts, on_missing='warn', extra='') src = _set_source_space_vertices(src.copy(), verts) # array used to store result raw_datas = list() _log_ch('Event information', info, event_ch) # don't process these any more if no MEG present n = 1 get_fwd = _SimForwards( dev_head_ts, offsets, info, trans, src, bem, mindist, n_jobs, meeg_picks, forward, use_cps) interper = _Interp2(offsets, get_fwd, interp) this_start = 0 for n in range(max_iter): if isinstance(stc_counted[1], (list, tuple)): this_n = stc_counted[1][0].data.shape[1] else: this_n = stc_counted[1].data.shape[1] this_stop = this_start + this_n logger.info(' Interval %0.3f-%0.3f sec' % (this_start / info['sfreq'], this_stop / info['sfreq'])) n_doing = this_stop - this_start assert n_doing > 0 this_data = np.zeros((len(info['ch_names']), n_doing)) raw_datas.append(this_data) # Stim channel fwd, fi = interper.feed(this_stop - this_start) fi = fi[0] stc_data, stim_data, _ = _stc_data_event( stc_counted, fi, info['sfreq'], get_fwd.src, None if n == 0 else verts) if event_ch is not None: this_data[event_ch, :] = stim_data[:n_doing] this_data[meeg_picks] = np.einsum('svt,vt->st', fwd, stc_data) try: stc_counted = next(stc_enum) except StopIteration: logger.info(' %d STC iteration%s provided' % (n + 1, _pl(n + 1))) break del fwd else: raise RuntimeError('Maximum number of STC iterations (%d) ' 'exceeded' % (n,)) raw_data = np.concatenate(raw_datas, axis=-1) raw = RawArray(raw_data, info, first_samp=first_samp, verbose=False) raw.set_annotations(raw.annotations) raw.verbose = raw_verbose logger.info('Done') return raw @verbose def add_eog(raw, head_pos=None, interp='cos2', n_jobs=1, random_state=None, verbose=None): """Add blink noise to raw data. Parameters ---------- raw : instance of Raw The raw instance to modify. %(head_pos)s %(interp)s %(n_jobs)s %(random_state)s The random generator state used for blink, ECG, and sensor noise randomization. %(verbose)s Returns ------- raw : instance of Raw The instance, modified in place. See Also -------- add_chpi add_ecg add_noise simulate_raw Notes ----- The blink artifacts are generated by: 1. Random activation times are drawn from an inhomogeneous poisson process whose blink rate oscillates between 4.5 blinks/minute and 17 blinks/minute based on the low (reading) and high (resting) blink rates from :footcite:`BentivoglioEtAl1997`. 2. The activation kernel is a 250 ms Hanning window. 3. Two activated dipoles are located in the z=0 plane (in head coordinates) at ±30 degrees away from the y axis (nasion). 4. Activations affect MEG and EEG channels. The scale-factor of the activation function was chosen based on visual inspection to yield amplitudes generally consistent with those seen in experimental data. Noisy versions of the activation will be stored in the first EOG channel in the raw instance, if it exists. References ---------- .. footbibliography:: """ return _add_exg(raw, 'blink', head_pos, interp, n_jobs, random_state) @verbose def add_ecg(raw, head_pos=None, interp='cos2', n_jobs=1, random_state=None, verbose=None): """Add ECG noise to raw data. Parameters ---------- raw : instance of Raw The raw instance to modify. %(head_pos)s %(interp)s %(n_jobs)s %(random_state)s The random generator state used for blink, ECG, and sensor noise randomization. %(verbose)s Returns ------- raw : instance of Raw The instance, modified in place. See Also -------- add_chpi add_eog add_noise simulate_raw Notes ----- The ECG artifacts are generated by: 1. Random inter-beat intervals are drawn from a uniform distribution of times corresponding to 40 and 80 beats per minute. 2. The activation function is the sum of three Hanning windows with varying durations and scales to make a more complex waveform. 3. The activated dipole is located one (estimated) head radius to the left (-x) of head center and three head radii below (+z) head center; this dipole is oriented in the +x direction. 4. Activations only affect MEG channels. The scale-factor of the activation function was chosen based on visual inspection to yield amplitudes generally consistent with those seen in experimental data. Noisy versions of the activation will be stored in the first EOG channel in the raw instance, if it exists. .. versionadded:: 0.18 """ return _add_exg(raw, 'ecg', head_pos, interp, n_jobs, random_state) def _add_exg(raw, kind, head_pos, interp, n_jobs, random_state): assert isinstance(kind, str) and kind in ('ecg', 'blink') _validate_type(raw, BaseRaw, 'raw') _check_preload(raw, 'Adding %s noise ' % (kind,)) rng = check_random_state(random_state) info, times, first_samp = raw.info, raw.times, raw.first_samp data = raw._data meg_picks = pick_types(info, meg=True, eeg=False, exclude=()) meeg_picks = pick_types(info, meg=True, eeg=True, exclude=()) R, r0 = fit_sphere_to_headshape(info, units='m', verbose=False)[:2] bem = make_sphere_model(r0, head_radius=R, relative_radii=(0.97, 0.98, 0.99, 1.), sigmas=(0.33, 1.0, 0.004, 0.33), verbose=False) trans = None dev_head_ts, offsets = _check_head_pos(head_pos, info, first_samp, times) if kind == 'blink': # place dipoles at 45 degree angles in z=0 plane exg_rr = np.array([[np.cos(np.pi / 3.), np.sin(np.pi / 3.), 0.], [-np.cos(np.pi / 3.), np.sin(np.pi / 3), 0.]]) exg_rr /= np.sqrt(np.sum(exg_rr * exg_rr, axis=1, keepdims=True)) exg_rr *= 0.96 * R exg_rr += r0 # oriented upward nn = np.array([[0., 0., 1.], [0., 0., 1.]]) # Blink times drawn from an inhomogeneous poisson process # by 1) creating the rate and 2) pulling random numbers blink_rate = (1 + np.cos(2 * np.pi * 1. / 60. * times)) / 2. blink_rate *= 12.5 / 60. blink_rate += 4.5 / 60. blink_data = rng.uniform(size=len(times)) < blink_rate / info['sfreq'] blink_data = blink_data * (rng.uniform(size=len(times)) + 0.5) # amps # Activation kernel is a simple hanning window blink_kernel = np.hanning(int(0.25 * info['sfreq'])) exg_data = np.convolve(blink_data, blink_kernel, 'same')[np.newaxis, :] * 1e-7 # Add rescaled noisy data to EOG ch ch = pick_types(info, meg=False, eeg=False, eog=True) picks = meeg_picks del blink_kernel, blink_rate, blink_data else: if len(meg_picks) == 0: raise RuntimeError('Can only add ECG artifacts if MEG data ' 'channels are present') exg_rr = np.array([[-R, 0, -3 * R]]) max_beats = int(np.ceil(times[-1] * 80. / 60.)) # activation times with intervals drawn from a uniform distribution # based on activation rates between 40 and 80 beats per minute cardiac_idx = np.cumsum(rng.uniform(60. / 80., 60. / 40., max_beats) * info['sfreq']).astype(int) cardiac_idx = cardiac_idx[cardiac_idx < len(times)] cardiac_data = np.zeros(len(times)) cardiac_data[cardiac_idx] = 1 # kernel is the sum of three hanning windows cardiac_kernel = np.concatenate([ 2 * np.hanning(int(0.04 * info['sfreq'])), -0.3 * np.hanning(int(0.05 * info['sfreq'])), 0.2 * np.hanning(int(0.26 * info['sfreq']))], axis=-1) exg_data = np.convolve(cardiac_data, cardiac_kernel, 'same')[np.newaxis, :] * 15e-8 # Add rescaled noisy data to ECG ch ch = pick_types(info, meg=False, eeg=False, ecg=True) picks = meg_picks del cardiac_data, cardiac_kernel, max_beats, cardiac_idx nn = np.zeros_like(exg_rr) nn[:, 0] = 1 # arbitrarily rightward del meg_picks, meeg_picks noise = rng.standard_normal(exg_data.shape[1]) * 5e-6 if len(ch) >= 1: ch = ch[-1] data[ch, :] = exg_data * 1e3 + noise else: ch = None src = setup_volume_source_space(pos=dict(rr=exg_rr, nn=nn), sphere_units='mm') _log_ch('%s simulated and trace' % kind, info, ch) del ch, nn, noise used = np.zeros(len(raw.times), bool) get_fwd = _SimForwards( dev_head_ts, offsets, info, trans, src, bem, 0.005, n_jobs, picks) interper = _Interp2(offsets, get_fwd, interp) proc_lims = np.concatenate([np.arange(0, len(used), 10000), [len(used)]]) for start, stop in zip(proc_lims[:-1], proc_lims[1:]): fwd, _ = interper.feed(stop - start) data[picks, start:stop] += np.einsum( 'svt,vt->st', fwd, exg_data[:, start:stop]) assert not used[start:stop].any() used[start:stop] = True assert used.all() @verbose def add_chpi(raw, head_pos=None, interp='cos2', n_jobs=1, verbose=None): """Add cHPI activations to raw data. Parameters ---------- raw : instance of Raw The raw instance to be modified. %(head_pos)s %(interp)s %(n_jobs)s %(verbose)s Returns ------- raw : instance of Raw The instance, modified in place. Notes ----- .. versionadded:: 0.18 """ _validate_type(raw, BaseRaw, 'raw') _check_preload(raw, 'Adding cHPI signals ') info, first_samp, times = raw.info, raw.first_samp, raw.times meg_picks = pick_types(info, meg=True, eeg=False, exclude=[]) # for CHPI if len(meg_picks) == 0: raise RuntimeError('Cannot add cHPI if no MEG picks are present') dev_head_ts, offsets = _check_head_pos(head_pos, info, first_samp, times) hpi_freqs, hpi_pick, hpi_ons = get_chpi_info(info, on_missing='raise') hpi_rrs = _get_hpi_initial_fit(info, verbose='error') hpi_nns = hpi_rrs / np.sqrt(np.sum(hpi_rrs * hpi_rrs, axis=1))[:, np.newaxis] # turn on cHPI in file data = raw._data data[hpi_pick, :] = hpi_ons.sum() _log_ch('cHPI status bits enbled and', info, hpi_pick) sinusoids = 70e-9 * np.sin(2 * np.pi * hpi_freqs[:, np.newaxis] * (np.arange(len(times)) / info['sfreq'])) info = pick_info(info, meg_picks) with info._unlock(): info.update(projs=[], bads=[]) # Ensure no 'projs' or 'bads' megcoils, _, _, _ = _prep_meg_channels(info, ignore_ref=False) used = np.zeros(len(raw.times), bool) dev_head_ts.append(dev_head_ts[-1]) # ZOH after time ends get_fwd = _HPIForwards(offsets, dev_head_ts, megcoils, hpi_rrs, hpi_nns) interper = _Interp2(offsets, get_fwd, interp) lims = np.concatenate([offsets, [len(raw.times)]]) for start, stop in zip(lims[:-1], lims[1:]): fwd, = interper.feed(stop - start) data[meg_picks, start:stop] += np.einsum( 'svt,vt->st', fwd, sinusoids[:, start:stop]) assert not used[start:stop].any() used[start:stop] = True assert used.all() return raw class _HPIForwards(object): def __init__(self, offsets, dev_head_ts, megcoils, hpi_rrs, hpi_nns): self.offsets = offsets self.dev_head_ts = dev_head_ts self.hpi_rrs = hpi_rrs self.hpi_nns = hpi_nns self.megcoils = megcoils self.idx = 0 def __call__(self, offset): assert offset == self.offsets[self.idx] _transform_orig_meg_coils(self.megcoils, self.dev_head_ts[self.idx]) fwd = _magnetic_dipole_field_vec(self.hpi_rrs, self.megcoils).T # align cHPI magnetic dipoles in approx. radial direction fwd = np.array([np.dot(fwd[:, 3 * ii:3 * (ii + 1)], self.hpi_nns[ii]) for ii in range(len(self.hpi_rrs))]).T self.idx += 1 return (fwd,) def _stc_data_event(stc_counted, head_idx, sfreq, src=None, verts=None): stc_idx, stc = stc_counted if isinstance(stc, (list, tuple)): if len(stc) != 2: raise ValueError('stc, if tuple, must be length 2, got %s' % (len(stc),)) stc, stim_data = stc else: stim_data = None _validate_type(stc, _BaseSourceEstimate, 'stc', 'SourceEstimate or tuple with first entry SourceEstimate') # Convert event data if stim_data is None: stim_data = np.zeros(len(stc.times), int) stim_data[np.argmin(np.abs(stc.times))] = head_idx del head_idx _validate_type(stim_data, np.ndarray, 'stim_data') if stim_data.dtype.kind != 'i': raise ValueError('stim_data in a stc tuple must be an integer ndarray,' ' got dtype %s' % (stim_data.dtype,)) if stim_data.shape != (len(stc.times),): raise ValueError('event data had shape %s but needed to be (%s,) to' 'match stc' % (stim_data.shape, len(stc.times))) # Validate STC if not np.allclose(sfreq, 1. / stc.tstep): raise ValueError('stc and info must have same sample rate, ' 'got %s and %s' % (1. / stc.tstep, sfreq)) if len(stc.times) <= 2: # to ensure event encoding works raise ValueError('stc must have at least three time points, got %s' % (len(stc.times),)) verts_ = stc.vertices if verts is None: assert stc_idx == 0 else: if len(verts) != len(verts_) or not all( np.array_equal(a, b) for a, b in zip(verts, verts_)): raise RuntimeError('Vertex mismatch for stc[%d], ' 'all stc.vertices must match' % (stc_idx,)) stc_data = stc.data if src is None: assert stc_idx == 0 else: # on_missing depends on whether or not this is the first iteration on_missing = 'warn' if verts is None else 'ignore' _, stc_sel, _ = _stc_src_sel(src, stc, on_missing=on_missing) stc_data = stc_data[stc_sel] return stc_data, stim_data, verts_ class _SimForwards(object): def __init__(self, dev_head_ts, offsets, info, trans, src, bem, mindist, n_jobs, meeg_picks, forward=None, use_cps=True): self.idx = 0 self.offsets = offsets self.use_cps = use_cps self.iter = iter(_iter_forward_solutions( info, trans, src, bem, dev_head_ts, mindist, n_jobs, forward, meeg_picks)) def __call__(self, offset): assert self.offsets[self.idx] == offset self.idx += 1 fwd = next(self.iter) self.src = fwd['src'] # XXX eventually we could speed this up by allowing the forward # solution code to only compute the normal direction convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=self.use_cps, copy=False, verbose=False) return fwd['sol']['data'], np.array(self.idx, float) def _iter_forward_solutions(info, trans, src, bem, dev_head_ts, mindist, n_jobs, forward, picks): """Calculate a forward solution for a subject.""" logger.info('Setting up forward solutions') info = pick_info(info, picks) with info._unlock(): info.update(projs=[], bads=[]) # Ensure no 'projs' or 'bads' mri_head_t, trans = _get_trans(trans) megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, info, \ update_kwargs, bem = _prepare_for_forward( src, mri_head_t, info, bem, mindist, n_jobs, allow_bem_none=True, verbose=False) del (src, mindist) if forward is None: eegfwd = _compute_forwards(rr, bem, [eegels], [None], [None], ['eeg'], n_jobs, verbose=False)[0] eegfwd = _to_forward_dict(eegfwd, eegnames) else: if len(eegnames) > 0: eegfwd = pick_channels_forward(forward, eegnames, verbose=False) else: eegfwd = None # short circuit here if there are no MEG channels (don't need to iterate) if len(pick_types(info, meg=True)) == 0: eegfwd.update(**update_kwargs) for _ in dev_head_ts: yield eegfwd yield eegfwd return coord_frame = FIFF.FIFFV_COORD_HEAD if bem is not None and not bem['is_sphere']: idx = np.where(np.array([s['id'] for s in bem['surfs']]) == FIFF.FIFFV_BEM_SURF_ID_BRAIN)[0] assert len(idx) == 1 # make a copy so it isn't mangled in use bem_surf = transform_surface_to(bem['surfs'][idx[0]], coord_frame, mri_head_t, copy=True) for ti, dev_head_t in enumerate(dev_head_ts): # Could be *slightly* more efficient not to do this N times, # but the cost here is tiny compared to actual fwd calculation logger.info('Computing gain matrix for transform #%s/%s' % (ti + 1, len(dev_head_ts))) _transform_orig_meg_coils(megcoils, dev_head_t) _transform_orig_meg_coils(compcoils, dev_head_t) # Make sure our sensors are all outside our BEM coil_rr = np.array([coil['r0'] for coil in megcoils]) # Compute forward if forward is None: if not bem['is_sphere']: outside = ~_CheckInside(bem_surf)(coil_rr, n_jobs, verbose=False) elif bem.radius is not None: d = coil_rr - bem['r0'] outside = np.sqrt(np.sum(d * d, axis=1)) > bem.radius else: # only r0 provided outside = np.ones(len(coil_rr), bool) if not outside.all(): raise RuntimeError('%s MEG sensors collided with inner skull ' 'surface for transform %s' % (np.sum(~outside), ti)) megfwd = _compute_forwards(rr, bem, [megcoils], [compcoils], [meg_info], ['meg'], n_jobs, verbose=False)[0] megfwd = _to_forward_dict(megfwd, megnames) else: megfwd = pick_channels_forward(forward, megnames, verbose=False) fwd = _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=False) fwd.update(**update_kwargs) yield fwd # need an extra one to fill last buffer yield fwd
py
1a49d0c75c0e17fc48556d7ff7e21f8903128c01
import copy import json import logging import os import torch from torch.utils.data import TensorDataset from utils import get_intent_labels, get_slot_labels logger = logging.getLogger(__name__) class InputExample(object): """ A single training/test example for simple sequence classification. Args: guid: Unique id for the example. words: list. The words of the sequence. intent_label: (Optional) string. The intent label of the example. slot_labels: (Optional) list. The slot labels of the example. """ def __init__(self, guid, words, intent_label=None, slot_labels=None): self.guid = guid self.words = words self.intent_label = intent_label self.slot_labels = slot_labels def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, attention_mask, token_type_ids, intent_label_id, slot_labels_ids): self.input_ids = input_ids self.attention_mask = attention_mask self.token_type_ids = token_type_ids self.intent_label_id = intent_label_id self.slot_labels_ids = slot_labels_ids def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class JointProcessor(object): """Processor for the JointBERT data set """ def __init__(self, args): self.args = args self.intent_labels = get_intent_labels(args) self.slot_labels = get_slot_labels(args) self.input_text_file = "seq.in" self.intent_label_file = "label" self.slot_labels_file = "seq.out" @classmethod def _read_file(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: lines = [] for line in f: lines.append(line.strip()) return lines def _create_examples(self, texts, intents, slots, set_type): """Creates examples for the training and dev sets.""" examples = [] for i, (text, intent, slot) in enumerate(zip(texts, intents, slots)): guid = "%s-%s" % (set_type, i) # 1. input_text words = text.split() # Some are spaced twice # 2. intent intent_label = ( self.intent_labels.index(intent) if intent in self.intent_labels else self.intent_labels.index("UNK") ) # 3. slot slot_labels = [] for s in slot.split(): slot_labels.append( self.slot_labels.index(s) if s in self.slot_labels else self.slot_labels.index("UNK") ) assert len(words) == len(slot_labels) examples.append(InputExample(guid=guid, words=words, intent_label=intent_label, slot_labels=slot_labels)) return examples def get_examples(self, mode): """ Args: mode: train, dev, test """ data_path = os.path.join(self.args.data_dir, self.args.token_level, mode) logger.info("LOOKING AT {}".format(data_path)) return self._create_examples( texts=self._read_file(os.path.join(data_path, self.input_text_file)), intents=self._read_file(os.path.join(data_path, self.intent_label_file)), slots=self._read_file(os.path.join(data_path, self.slot_labels_file)), set_type=mode, ) processors = {"syllable-level": JointProcessor, "word-level": JointProcessor} def convert_examples_to_features( examples, max_seq_len, tokenizer, pad_token_label_id=-100, cls_token_segment_id=0, pad_token_segment_id=0, sequence_a_segment_id=0, mask_padding_with_zero=True, ): # Setting based on the current model type cls_token = tokenizer.cls_token sep_token = tokenizer.sep_token unk_token = tokenizer.unk_token pad_token_id = tokenizer.pad_token_id features = [] for (ex_index, example) in enumerate(examples): if ex_index % 5000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) # Tokenize word by word (for NER) tokens = [] slot_labels_ids = [] for word, slot_label in zip(example.words, example.slot_labels): word_tokens = tokenizer.tokenize(word) if not word_tokens: word_tokens = [unk_token] # For handling the bad-encoded word tokens.extend(word_tokens) # Use the real label id for the first token of the word, and padding ids for the remaining tokens slot_labels_ids.extend([int(slot_label)] + [pad_token_label_id] * (len(word_tokens) - 1)) # Account for [CLS] and [SEP] special_tokens_count = 2 if len(tokens) > max_seq_len - special_tokens_count: tokens = tokens[: (max_seq_len - special_tokens_count)] slot_labels_ids = slot_labels_ids[: (max_seq_len - special_tokens_count)] # Add [SEP] token tokens += [sep_token] slot_labels_ids += [pad_token_label_id] token_type_ids = [sequence_a_segment_id] * len(tokens) # Add [CLS] token tokens = [cls_token] + tokens slot_labels_ids = [pad_token_label_id] + slot_labels_ids token_type_ids = [cls_token_segment_id] + token_type_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_len - len(input_ids) input_ids = input_ids + ([pad_token_id] * padding_length) attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length) slot_labels_ids = slot_labels_ids + ([pad_token_label_id] * padding_length) assert len(input_ids) == max_seq_len, "Error with input length {} vs {}".format(len(input_ids), max_seq_len) assert len(attention_mask) == max_seq_len, "Error with attention mask length {} vs {}".format( len(attention_mask), max_seq_len ) assert len(token_type_ids) == max_seq_len, "Error with token type length {} vs {}".format( len(token_type_ids), max_seq_len ) assert len(slot_labels_ids) == max_seq_len, "Error with slot labels length {} vs {}".format( len(slot_labels_ids), max_seq_len ) intent_label_id = int(example.intent_label) if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % example.guid) logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask])) logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids])) logger.info("intent_label: %s (id = %d)" % (example.intent_label, intent_label_id)) logger.info("slot_labels: %s" % " ".join([str(x) for x in slot_labels_ids])) features.append( InputFeatures( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, intent_label_id=intent_label_id, slot_labels_ids=slot_labels_ids, ) ) return features def load_and_cache_examples(args, tokenizer, mode): processor = processors[args.token_level](args) # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}".format( mode, args.token_level, list(filter(None, args.model_name_or_path.split("/"))).pop(), args.max_seq_len ), ) if os.path.exists(cached_features_file): logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: # Load data features from dataset file logger.info("Creating features from dataset file at %s", args.data_dir) if mode == "train": examples = processor.get_examples("train") elif mode == "dev": examples = processor.get_examples("dev") elif mode == "test": examples = processor.get_examples("test") else: raise Exception("For mode, Only train, dev, test is available") # Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later pad_token_label_id = args.ignore_index features = convert_examples_to_features( examples, args.max_seq_len, tokenizer, pad_token_label_id=pad_token_label_id ) logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) all_intent_label_ids = torch.tensor([f.intent_label_id for f in features], dtype=torch.long) all_slot_labels_ids = torch.tensor([f.slot_labels_ids for f in features], dtype=torch.long) dataset = TensorDataset( all_input_ids, all_attention_mask, all_token_type_ids, all_intent_label_ids, all_slot_labels_ids ) return dataset
py
1a49d135af2ebc46e0b642bf1a09396a08804b59
from __future__ import division import numpy as np import chainer from chainer.functions import dropout from chainer.functions import max_pooling_2d from chainer.functions import relu from chainer.functions import softmax from chainer.initializers import constant from chainer.initializers import normal from chainer.links import Linear from chainercv.links.connection.conv_2d_activ import Conv2DActiv from chainercv.links.model.pickable_sequential_chain import \ PickableSequentialChain from chainercv import utils # RGB order _imagenet_mean = np.array( [123.68, 116.779, 103.939], dtype=np.float32)[:, np.newaxis, np.newaxis] class VGG16(PickableSequentialChain): """VGG-16 Network. This is a pickable sequential link. The network can choose output layers from set of all intermediate layers. The attribute :obj:`pick` is the names of the layers that are going to be picked by :meth:`forward`. The attribute :obj:`layer_names` is the names of all layers that can be picked. Examples: >>> model = VGG16() # By default, forward returns a probability score (after Softmax). >>> prob = model(imgs) >>> model.pick = 'conv5_3' # This is layer conv5_3 (after ReLU). >>> conv5_3 = model(imgs) >>> model.pick = ['conv5_3', 'fc6'] >>> # These are layers conv5_3 (after ReLU) and fc6 (before ReLU). >>> conv5_3, fc6 = model(imgs) .. seealso:: :class:`chainercv.links.model.PickableSequentialChain` When :obj:`pretrained_model` is the path of a pre-trained chainer model serialized as a :obj:`.npz` file in the constructor, this chain model automatically initializes all the parameters with it. When a string in the prespecified set is provided, a pretrained model is loaded from weights distributed on the Internet. The list of pretrained models supported are as follows: * :obj:`imagenet`: Loads weights trained with ImageNet and distributed \ at `Model Zoo \ <https://github.com/BVLC/caffe/wiki/Model-Zoo>`_. Args: n_class (int): The number of classes. If :obj:`None`, the default values are used. If a supported pretrained model is used, the number of classes used to train the pretrained model is used. Otherwise, the number of classes in ILSVRC 2012 dataset is used. pretrained_model (string): The destination of the pre-trained chainer model serialized as a :obj:`.npz` file. If this is one of the strings described above, it automatically loads weights stored under a directory :obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/`, where :obj:`$CHAINER_DATASET_ROOT` is set as :obj:`$HOME/.chainer/dataset` unless you specify another value by modifying the environment variable. mean (numpy.ndarray): A mean value. If :obj:`None`, the default values are used. If a supported pretrained model is used, the mean value used to train the pretrained model is used. Otherwise, the mean value calculated from ILSVRC 2012 dataset is used. initialW (callable): Initializer for the weights. initial_bias (callable): Initializer for the biases. """ _models = { 'imagenet': { 'param': {'n_class': 1000, 'mean': _imagenet_mean}, 'overwritable': ('mean',), 'url': 'https://chainercv-models.preferred.jp/' 'vgg16_imagenet_converted_2017_07_18.npz' } } def __init__(self, n_class=None, pretrained_model=None, mean=None, initialW=None, initial_bias=None): param, path = utils.prepare_pretrained_model( {'n_class': n_class, 'mean': mean}, pretrained_model, self._models, {'n_class': 1000, 'mean': _imagenet_mean}) self.mean = param['mean'] if initialW is None: # Employ default initializers used in the original paper. initialW = normal.Normal(0.01) if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. initialW = constant.Zero() kwargs = {'initialW': initialW, 'initial_bias': initial_bias} super(VGG16, self).__init__() with self.init_scope(): self.conv1_1 = Conv2DActiv(None, 64, 3, 1, 1, **kwargs) self.conv1_2 = Conv2DActiv(None, 64, 3, 1, 1, **kwargs) self.pool1 = _max_pooling_2d self.conv2_1 = Conv2DActiv(None, 128, 3, 1, 1, **kwargs) self.conv2_2 = Conv2DActiv(None, 128, 3, 1, 1, **kwargs) self.pool2 = _max_pooling_2d self.conv3_1 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs) self.conv3_2 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs) self.conv3_3 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs) self.pool3 = _max_pooling_2d self.conv4_1 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.conv4_2 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.conv4_3 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.pool4 = _max_pooling_2d self.conv5_1 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.conv5_2 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.conv5_3 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.pool5 = _max_pooling_2d self.fc6 = Linear(None, 4096, **kwargs) self.fc6_relu = relu self.fc6_dropout = dropout self.fc7 = Linear(None, 4096, **kwargs) self.fc7_relu = relu self.fc7_dropout = dropout self.fc8 = Linear(None, param['n_class'], **kwargs) self.prob = softmax if path: chainer.serializers.load_npz(path, self) def _max_pooling_2d(x): return max_pooling_2d(x, ksize=2)
py
1a49d13ec80e92562ca84a5b7e299dabdd5db8ce
from shapely.geometry import Polygon from rtree import index import copy import uuid from collections import Counter class Box: def __init__(self): self.box= {} self.box['boundingBox'] = {'vertices':[{'x':0,'y':0} ,{'x':0,'y':0},{'x':0,'y':0},{'x':0,'y':0}]} self.box['identifier'] = str(uuid.uuid4()) self.box['text'] = '' self.box['class'] ='TEXT' self.box['font'] = {'family':'Arial Unicode MS', 'size':0, 'style':'REGULAR'} def get_box(self): return self.box class MapKeys: def __init__(self,box): self.box = box self.left = None self.right = None self.top = None self.height = None self.width = None self.bottom = None def get_left(self): if self.left != None: return self.left else : self.left = int(self.box['boundingBox']['vertices'][0]['x']) return self.left def get_right(self): if self.right != None: return self.right else : self.right = int(self.box['boundingBox']['vertices'][1]['x']) return self.right def get_top(self): if self.top != None: return self.top else : self.top = int(self.box['boundingBox']['vertices'][0]['y']) return self.top def get_bottom(self): if self.bottom != None: return self.bottom else : self.bottom = int(self.box['boundingBox']['vertices'][3]['y']) return self.bottom def get_height(self): if self.height != None: return self.height else : self.height = int(abs(self.get_top() - self.get_bottom())) return self.height def get_width(self): if self.width != None: return self.width else : self.width = int(abs(self.get_left() - self.get_right())) return self.width def index_tree(poly_index, poly, idx): idx.insert(poly_index, poly.bounds) def get_polygon(region): points = [] vertices = region['vertices'] for point in vertices: points.append((point['x'], point['y'])) if not (max(points)==(0,0) and min(points)==(0,0)): poly = Polygon(points) if not poly.is_valid: poly = poly.buffer(0.01) return poly else: return False def sort_regions(region_lines, sorted_lines=[]): check_y =region_lines[0]['boundingBox']['vertices'][0]['y'] spacing_threshold = abs(check_y - region_lines[0]['boundingBox']['vertices'][3]['y'])* 0.6 #0.8 # *2 #*0.5 same_line = list(filter(lambda x: (abs(x['boundingBox']['vertices'][0]['y'] - check_y) <= spacing_threshold), region_lines)) next_line = list(filter(lambda x: (abs(x['boundingBox']['vertices'][0]['y'] - check_y) > spacing_threshold), region_lines)) if len(same_line) >1 : same_line.sort(key=lambda x: x['boundingBox']['vertices'][0]['x'],reverse=False) sorted_lines += same_line if len(next_line) > 0: sort_regions(next_line, sorted_lines) return sorted_lines def add_font(regions): for idx,region in enumerate(regions): if not 'font' in region.keys(): height = abs(region['boundingBox']['vertices'][0]['y'] - region['boundingBox']['vertices'][2]['y']) regions[idx]['font']={'family':'Arial Unicode MS', 'size':height, 'style':'REGULAR'} return regions def collate_regions(regions, lines, child_class=None, grand_children=False,region_flag = True,skip_enpty_children=False,add_font=False ): child_key='regions' idx = index.Index() lines_intersected = [] if regions !=None and len(regions) > 0: lines_intersected =[] for line_idx, line in enumerate(lines): if child_class == 'LINE': if 'text' in line.keys(): del lines[line_idx]['text'] if add_font and 'font' not in line.keys(): height = abs(line['boundingBox']['vertices'][0]['y'] - line['boundingBox']['vertices'][2]['y']) lines[line_idx]['font']={'family':'Arial Unicode MS', 'size':height, 'style':'REGULAR'} if child_class is not None: lines[line_idx]['class'] = child_class poly = get_polygon(line['boundingBox']) if poly: idx.insert(line_idx, poly.bounds) for region_index, region in enumerate(regions): region_poly = get_polygon(region['boundingBox']) children_lines =[] if region_poly: children_lines = list(idx.intersection(region_poly.bounds)) if len(children_lines) > 0: region_lines = [] for intr_index in children_lines: if intr_index not in lines_intersected: if grand_children : if child_key not in lines[intr_index].keys(): grand_child = copy.deepcopy(lines[intr_index]) grand_child['class'] = 'WORD' lines[intr_index][child_key] = [grand_child] line_poly = get_polygon(lines[intr_index]['boundingBox']) if line_poly: area = region_poly.intersection(line_poly).area reg_area = region_poly.area line_area = line_poly.area if reg_area>0 and line_area>0 and area/min(line_area,reg_area) >0.5 : region_lines.append(lines[intr_index]) lines_intersected.append(intr_index) region_lines.sort(key=lambda x:x['boundingBox']['vertices'][0]['y']) if len(region_lines) > 0: regions[region_index][child_key] = sort_regions(region_lines,[]) regions[region_index]['avg_size'] = get_avrage_size(region_lines) else: regions[region_index][child_key] = [] else: regions[region_index][child_key] = [] if region_flag: for line_index, line in enumerate(lines): if line_index not in lines_intersected: line[child_key] = [ copy.deepcopy(line)] if child_class is not None: if child_class is 'LINE': line['class'] = 'PARA' if child_class is 'WORD': line['class'] ='LINE' regions.append(line) return regions def collate_cell_regions(regions, lines, child_class=None, grand_children=False,region_flag = True,skip_enpty_children=False,add_font=False ): child_key='regions' idx = index.Index() lines_intersected = [] if regions !=None and len(regions) > 0: lines_intersected =[] for line_idx, line in enumerate(lines): if child_class == 'LINE': if 'text' in line.keys(): del lines[line_idx]['text'] if add_font: height = abs(line['boundingBox']['vertices'][0]['y'] - line['boundingBox']['vertices'][2]['y']) lines[line_idx]['font']={'family':'Arial Unicode MS', 'size':height, 'style':'REGULAR'} if child_class is not None: lines[line_idx]['class'] = child_class poly = get_polygon(line['boundingBox']) if poly: idx.insert(line_idx, poly.bounds) for region_index, region in enumerate(regions): children_lines =[] region_poly = get_polygon(region['boundingBox']) if region_poly: children_lines = list(idx.intersection(region_poly.bounds)) if len(children_lines) > 0: region_lines = [] for intr_index in children_lines: if intr_index not in lines_intersected: if grand_children : if child_key not in lines[intr_index].keys(): grand_child = copy.deepcopy(lines[intr_index]) grand_child['class'] = 'WORD' lines[intr_index][child_key] = [grand_child] line_poly = get_polygon(lines[intr_index]['boundingBox']) if line_poly: area = region_poly.intersection(line_poly).area reg_area = region_poly.area line_area = line_poly.area if reg_area>0 and line_area>0 and area/min(line_area,reg_area) >0.5 : region_lines.append(lines[intr_index]) lines_intersected.append(intr_index) if child_key in region.keys() and type(region[child_key]) is list: pass else: region[child_key] = [] region_lines.sort(key=lambda x:x['boundingBox']['vertices'][0]['y']) if len(region_lines) > 1: regions[region_index][child_key].extend(sort_regions(region_lines,[])) regions[region_index]['avg_size'] = get_avrage_size(region_lines) else : regions[region_index][child_key].extend(region_lines) regions[region_index]['avg_size'] = get_avrage_size(region_lines) return regions def collate_text(file,craft_words, google_words): idx = index.Index() words_intersected = [] if craft_words !=None and len(craft_words) > 0: words_intersected =[] for word_idx, g_word in enumerate(google_words): poly = get_polygon(g_word['boundingBox']) if poly: idx.insert(word_idx, poly.bounds) for region_index, region in enumerate(craft_words): region_poly = get_polygon(region['boundingBox']) if region_poly: child_words = list(idx.intersection(region_poly.bounds)) text= ''; avg_conf = 0; conf_counter = 0; lang = [] if len(child_words) > 0: region_words = [] for intr_index in child_words: if intr_index not in words_intersected: line_poly = get_polygon(google_words[intr_index]['boundingBox']) if line_poly: area = region_poly.intersection(line_poly).area reg_area = region_poly.area line_area = line_poly.area if reg_area>0 and line_area>0 and area/min(line_area,reg_area) >0.3 : region_words.append(google_words[intr_index]) words_intersected.append(intr_index) region_words.sort(key=lambda x:x['boundingBox']['vertices'][0]['x']) for region_word in region_words: try: text = text + str(region_word['text']) if 'conf' in region_word.keys() and region_word['conf'] is not None: avg_conf += region_word['conf'] conf_counter += 1 if 'language' in region_word.keys() and region_word['language'] is not None: lang.append(region_word['language']) except Exception as e: print('error in collating text' + str(e)) if "craft_word" in file['config']["OCR"].keys() and file['config']["OCR"]["craft_word"]=="False" and len(region_words)>0: craft_words[region_index]['boundingBox'] = merge_corrds(region_words) if "craft_word" not in file['config']["OCR"].keys() and len(region_words)>0: craft_words[region_index]['boundingBox'] = merge_corrds(region_words) craft_words[region_index]['text'] = text if conf_counter> 0: craft_words[region_index]['conf'] = avg_conf/conf_counter else : craft_words[region_index]['conf'] = avg_conf craft_words[region_index]['language'] = frequent_element(lang) for g_word_index, g_word in enumerate(google_words): if g_word_index not in words_intersected: craft_words.append(g_word) return craft_words def remvoe_regions(regions, lines): idx = index.Index() lines_intersected = [] not_intersecting = [] if regions !=None and len(regions) > 0: lines_intersected =[] for line_idx, line in enumerate(lines): poly = get_polygon(line['boundingBox']) if poly: idx.insert(line_idx, poly.bounds) for region_index, region in enumerate(regions): region_poly = get_polygon(region['boundingBox']) if region_poly: children_lines = list(idx.intersection(region_poly.bounds)) if len(children_lines) > 0: region_lines = [] for intr_index in children_lines: region_lines.append(lines[intr_index]) lines_intersected.append(intr_index) for line_index, line in enumerate(lines): if line_index not in lines_intersected: not_intersecting.append(line) return not_intersecting def filterd_regions(regions): f_regions = [] if regions != None : for region in regions : height = abs(region['boundingBox']['vertices'][0]['y'] - region['boundingBox']['vertices'][2]['y']) if height > 0 : f_regions.append(region) return f_regions def frequent_element(l_ist): if len(l_ist) > 0 : occurence_count = Counter(l_ist) return occurence_count.most_common(1)[0][0] else : return None def get_ngram(indices, window_size = 2): ngrams = [] count = 0 for token in indices[:len(indices)-window_size+1]: ngrams.append(indices[count:count+window_size]) count = count+1 return ngrams def are_hlines(region1,region2,avg_ver_ratio): space = abs( region1['boundingBox']['vertices'][0]['y'] - region2['boundingBox']['vertices'][0]['y']) sepration = region2['boundingBox']['vertices'][0]['x'] - region1['boundingBox']['vertices'][1]['x'] h1 = abs(region1['boundingBox']['vertices'][3]['y'] - region1['boundingBox']['vertices'][0]['y']) h2 = abs(region2['boundingBox']['vertices'][3]['y'] - region2['boundingBox']['vertices'][0]['y']) max_height = max( h1 , h2 ) #*0.5 if avg_ver_ratio < 1.8 : diff_threshold = max_height * 0.8 if avg_ver_ratio >= 1.8 : diff_threshold = max_height * 0.9 #return ((space <= diff_threshold ) or(sepration <= 3 *avg_height)) and (sepration < 6 * avg_height) and (space <= diff_threshold *2.5 ) return sepration < 5 * max_height and space <= diff_threshold def merge_text(v_blocks): for block_index, v_block in enumerate(v_blocks): try: v_blocks[block_index]['font'] ={'family':'Arial Unicode MS', 'size':0, 'style':'REGULAR'} v_blocks['font']['size'] = max(v_block['regions'], key=lambda x: x['font']['size'])['font']['size'] if len(v_block['regions']) > 0 : v_blocks[block_index]['text'] = v_block['regions'][0]['text'] if len(v_block['regions']) > 1: for child in range(1, len(v_block['regions'])): v_blocks[block_index]['text'] += ' ' + str(v_block['regions'][child]['text']) except Exception as e: print('Error in merging text {}'.format(e)) return v_blocks def get_avrage_size(regions): size = 0 if regions != None: len_regions = len(regions) count=0 if len_regions> 0 : for region in regions : if 'font' in region.keys(): size += region['font']['size'] count=count+1 if count==0: count=1 return int(size/ count) else: return size else: return size def set_font_info(page_words,font_info): idx = index.Index() if font_info != None and len(font_info) > 0: for word_idx, word in enumerate(page_words): height = abs(word['boundingBox']['vertices'][0]['y'] - word['boundingBox']['vertices'][2]['y']) page_words[word_idx]['font'] = {'family': 'Arial Unicode MS', 'size': height, 'style': 'REGULAR'} poly = get_polygon(word['boundingBox']) if poly: idx.insert(word_idx, poly.bounds) for region_index, region in enumerate(font_info): region_poly = get_polygon(region['boundingBox']) if region_poly: children_lines = list(idx.intersection(region_poly.bounds)) if len(children_lines) > 0: for intr_index in children_lines: #if intr_index not in words_intersected: line_poly = get_polygon(page_words[intr_index]['boundingBox']) if line_poly: area = region_poly.intersection(line_poly).area reg_area = region_poly.area line_area = line_poly.area if reg_area > 0 and line_area > 0 : if (region['class'] == 'BOLD') and (area / min(line_area, reg_area) > 0.2): page_words[intr_index]['font']['style']= update_style(page_words[intr_index]['font']['style'], 'BOLD') if (region['class'] == 'SUPERSCRIPT') and (region_poly.union(line_poly).area != 0): iou = area / region_poly.union(line_poly).area if iou > 0.33: page_words[intr_index]['font']['style']= update_style(page_words[intr_index]['font']['style'], 'SUPERSCRIPT') return page_words def update_style(prior_cls, cls): if prior_cls == 'REGULAR': return cls else : if prior_cls == cls: return cls else : return '{},{}'.format(prior_cls,cls) def merge_children(siblings,children_none=False): box = Box().get_box() if not children_none: box['regions'] = copy.deepcopy(siblings) box['boundingBox']['vertices'][0]['x'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][0]['x'])['boundingBox']['vertices'][0]['x'] box['boundingBox']['vertices'][0]['y'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][0]['y'])['boundingBox']['vertices'][0]['y'] box['boundingBox']['vertices'][1]['x'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][1]['x'])['boundingBox']['vertices'][1]['x'] box['boundingBox']['vertices'][1]['y'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][1]['y'])['boundingBox']['vertices'][1]['y'] box['boundingBox']['vertices'][2]['x'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][2]['x'])['boundingBox']['vertices'][2]['x'] box['boundingBox']['vertices'][2]['y'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][2]['y'])['boundingBox']['vertices'][2]['y'] box['boundingBox']['vertices'][3]['x'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][3]['x'])['boundingBox']['vertices'][3]['x'] box['boundingBox']['vertices'][3]['y'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][3]['y'])['boundingBox']['vertices'][3]['y'] return box def merge_corrds(siblings,children_none=False): box = Box().get_box() box['boundingBox']['vertices'][0]['x'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][0]['x'])['boundingBox']['vertices'][0]['x'] box['boundingBox']['vertices'][0]['y'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][0]['y'])['boundingBox']['vertices'][0]['y'] box['boundingBox']['vertices'][1]['x'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][1]['x'])['boundingBox']['vertices'][1]['x'] box['boundingBox']['vertices'][1]['y'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][1]['y'])['boundingBox']['vertices'][1]['y'] box['boundingBox']['vertices'][2]['x'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][2]['x'])['boundingBox']['vertices'][2]['x'] box['boundingBox']['vertices'][2]['y'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][2]['y'])['boundingBox']['vertices'][2]['y'] box['boundingBox']['vertices'][3]['x'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][3]['x'])['boundingBox']['vertices'][3]['x'] box['boundingBox']['vertices'][3]['y'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][3]['y'])['boundingBox']['vertices'][3]['y'] return box['boundingBox']
bzl
1a49d26954b48af816be2e918a18cd0e27cca2e4
load(":known_shas.bzl", "FILE_KEY_TO_SHA") load("//rust/platform:triple_mappings.bzl", "system_to_binary_ext", "system_to_dylib_ext", "system_to_staticlib_ext", "triple_to_constraint_set", "triple_to_system") load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") DEFAULT_TOOLCHAIN_NAME_PREFIX = "toolchain_for" def rust_repositories(): """Emits a default set of toolchains for Linux, OSX, and Freebsd Skip this macro and call the `rust_repository_set` macros directly if you need a compiler for other hosts or for additional target triples. """ RUST_VERSION = "1.35.0" maybe( http_archive, name = "rules_cc", url = "https://github.com/bazelbuild/rules_cc/archive/624b5d59dfb45672d4239422fa1e3de1822ee110.zip", sha256 = "8c7e8bf24a2bf515713445199a677ee2336e1c487fa1da41037c6026de04bbc3", strip_prefix = "rules_cc-624b5d59dfb45672d4239422fa1e3de1822ee110", type = "zip", ) rust_repository_set( name = "rust_linux_x86_64", exec_triple = "x86_64-unknown-linux-gnu", extra_target_triples = ["wasm32-unknown-unknown"], version = RUST_VERSION, ) rust_repository_set( name = "rust_darwin_x86_64", exec_triple = "x86_64-apple-darwin", extra_target_triples = ["wasm32-unknown-unknown"], version = RUST_VERSION, ) rust_repository_set( name = "rust_freebsd_x86_64", exec_triple = "x86_64-unknown-freebsd", extra_target_triples = ["wasm32-unknown-unknown"], version = RUST_VERSION, ) def _check_version_valid(version, iso_date, param_prefix = ""): """Verifies that the provided rust version and iso_date make sense.""" if not version and iso_date: fail("{param_prefix}iso_date must be paired with a {param_prefix}version".format(param_prefix = param_prefix)) if version in ("beta", "nightly") and not iso_date: fail("{param_prefix}iso_date must be specified if version is 'beta' or 'nightly'".format(param_prefix = param_prefix)) if version not in ("beta", "nightly") and iso_date: print("{param_prefix}iso_date is ineffective if an exact version is specified".format(param_prefix = param_prefix)) def serialized_constraint_set_from_triple(target_triple): constraint_set = triple_to_constraint_set(target_triple) constraint_set_strs = [] for constraint in constraint_set: constraint_set_strs.append("\"{}\"".format(constraint)) return "[{}]".format(", ".join(constraint_set_strs)) def BUILD_for_compiler(target_triple): """Emits a BUILD file the compiler .tar.gz.""" system = triple_to_system(target_triple) return """ load("@io_bazel_rules_rust//rust:toolchain.bzl", "rust_toolchain") filegroup( name = "rustc", srcs = ["bin/rustc{binary_ext}"], visibility = ["//visibility:public"], ) filegroup( name = "rustc_lib", srcs = glob([ "lib/*{dylib_ext}", "lib/rustlib/{target_triple}/codegen-backends/*{dylib_ext}", ]), visibility = ["//visibility:public"], ) filegroup( name = "rustdoc", srcs = ["bin/rustdoc{binary_ext}"], visibility = ["//visibility:public"], ) """.format( binary_ext = system_to_binary_ext(system), staticlib_ext = system_to_staticlib_ext(system), dylib_ext = system_to_dylib_ext(system), target_triple = target_triple, ) def BUILD_for_stdlib(target_triple): """Emits a BUILD file the stdlib .tar.gz.""" system = triple_to_system(target_triple) return """ filegroup( name = "rust_lib-{target_triple}", srcs = glob( [ "lib/rustlib/{target_triple}/lib/*.rlib", "lib/rustlib/{target_triple}/lib/*{dylib_ext}", "lib/rustlib/{target_triple}/lib/*{staticlib_ext}", ], # Some patterns (e.g. `lib/*.a`) don't match anything, see https://github.com/bazelbuild/rules_rust/pull/245 allow_empty = True, ), visibility = ["//visibility:public"], ) """.format( binary_ext = system_to_binary_ext(system), staticlib_ext = system_to_staticlib_ext(system), dylib_ext = system_to_dylib_ext(system), target_triple = target_triple, ) def BUILD_for_rust_toolchain(workspace_name, name, exec_triple, target_triple, default_edition = "2015"): """Emits a toolchain declaration to match an existing compiler and stdlib. Args: workspace_name: The name of the workspace that this toolchain resides in name: The name of the toolchain declaration exec_triple: The rust-style target that this compiler runs on target_triple: The rust-style target triple of the tool """ system = triple_to_system(target_triple) return """ rust_toolchain( name = "{toolchain_name}_impl", rust_doc = "@{workspace_name}//:rustdoc", rust_lib = "@{workspace_name}//:rust_lib-{target_triple}", rustc = "@{workspace_name}//:rustc", rustc_lib = "@{workspace_name}//:rustc_lib", staticlib_ext = "{staticlib_ext}", dylib_ext = "{dylib_ext}", os = "{system}", default_edition = "{default_edition}", exec_triple = "{exec_triple}", target_triple = "{target_triple}", visibility = ["//visibility:public"], ) """.format( toolchain_name = name, workspace_name = workspace_name, staticlib_ext = system_to_staticlib_ext(system), dylib_ext = system_to_dylib_ext(system), system = system, default_edition = default_edition, exec_triple = exec_triple, target_triple = target_triple, ) def BUILD_for_toolchain(name, parent_workspace_name, exec_triple, target_triple): return """ toolchain( name = "{name}", exec_compatible_with = {exec_constraint_sets_serialized}, target_compatible_with = {target_constraint_sets_serialized}, toolchain = "@{parent_workspace_name}//:{name}_impl", toolchain_type = "@io_bazel_rules_rust//rust:toolchain", ) """.format( name = name, exec_constraint_sets_serialized = serialized_constraint_set_from_triple(exec_triple), target_constraint_sets_serialized = serialized_constraint_set_from_triple(target_triple), parent_workspace_name = parent_workspace_name, ) def produce_tool_suburl(tool_name, target_triple, version, iso_date = None): """Produces a fully qualified Rust tool name for URL Args: tool_name: The name of the tool per static.rust-lang.org target_triple: The rust-style target triple of the tool version: The version of the tool among "nightly", "beta', or an exact version. iso_date: The date of the tool (or None, if the version is a specific version). """ if iso_date: return "{}/{}-{}-{}".format(iso_date, tool_name, version, target_triple) else: return "{}-{}-{}".format(tool_name, version, target_triple) def produce_tool_path(tool_name, target_triple, version): """Produces a qualified Rust tool name Args: tool_name: The name of the tool per static.rust-lang.org target_triple: The rust-style target triple of the tool version: The version of the tool among "nightly", "beta', or an exact version. """ return "{}-{}-{}".format(tool_name, version, target_triple) def load_arbitrary_tool(ctx, tool_name, param_prefix, tool_subdirectory, version, iso_date, target_triple): """Loads a Rust tool, downloads, and extracts into the common workspace. This function sources the tool from the Rust-lang static file server. The index is available at: https://static.rust-lang.org/dist/index.html (or the path specified by "${RUST_STATIC_URL}/dist/index.html" if the RUST_STATIC_URL envinronment variable is set). Args: ctx: A repository_ctx (no attrs required). tool_name: The name of the given tool per the archive naming. param_prefix: The name of the versioning param if the repository rule supports multiple tools. tool_subdirectory: The subdirectory of the tool files (wo level below the root directory of the archive. The root directory of the archive is expected to match $TOOL_NAME-$VERSION-$TARGET_TRIPLE. version: The version of the tool among "nightly", "beta', or an exact version. iso_date: The date of the tool (or None, if the version is a specific version). target_triple: The rust-style target triple of the tool """ _check_version_valid(version, iso_date, param_prefix) # N.B. See https://static.rust-lang.org/dist/index.html to find the tool_suburl for a given # tool. tool_suburl = produce_tool_suburl(tool_name, target_triple, version, iso_date) static_rust = ctx.os.environ["STATIC_RUST_URL"] if "STATIC_RUST_URL" in ctx.os.environ else "https://static.rust-lang.org" url = "{}/dist/{}.tar.gz".format(static_rust, tool_suburl) tool_path = produce_tool_path(tool_name, target_triple, version) ctx.download_and_extract( url, output = "", sha256 = FILE_KEY_TO_SHA.get(tool_suburl) or "", stripPrefix = "{}/{}".format(tool_path, tool_subdirectory), ) def _load_rust_compiler(ctx): """Loads a rust compiler and yields corresponding BUILD for it Args: ctx: A repository_ctx. Returns: The BUILD file contents for this compiler and compiler library """ target_triple = ctx.attr.exec_triple load_arbitrary_tool( ctx, iso_date = ctx.attr.iso_date, param_prefix = "rustc_", target_triple = target_triple, tool_name = "rustc", tool_subdirectory = "rustc", version = ctx.attr.version, ) compiler_BUILD = BUILD_for_compiler(target_triple) return compiler_BUILD def _load_rust_stdlib(ctx, target_triple): """Loads a rust standard library and yields corresponding BUILD for it Args: ctx: A repository_ctx. target_triple: The rust-style target triple of the tool Returns: The BUILD file contents for this stdlib, and a toolchain decl to match """ load_arbitrary_tool( ctx, iso_date = ctx.attr.iso_date, param_prefix = "rust-std_", target_triple = target_triple, tool_name = "rust-std", tool_subdirectory = "rust-std-{}".format(target_triple), version = ctx.attr.version, ) toolchain_prefix = ctx.attr.toolchain_name_prefix or DEFAULT_TOOLCHAIN_NAME_PREFIX stdlib_BUILD = BUILD_for_stdlib(target_triple) toolchain_BUILD = BUILD_for_rust_toolchain( name = "{toolchain_prefix}_{target_triple}".format( toolchain_prefix = toolchain_prefix, target_triple = target_triple, ), exec_triple = ctx.attr.exec_triple, target_triple = target_triple, workspace_name = ctx.attr.name, ) return stdlib_BUILD + toolchain_BUILD def _rust_toolchain_repository_impl(ctx): """The implementation of the rust toolchain repository rule.""" _check_version_valid(ctx.attr.version, ctx.attr.iso_date) BUILD_components = [_load_rust_compiler(ctx)] for target_triple in [ctx.attr.exec_triple] + ctx.attr.extra_target_triples: BUILD_components.append(_load_rust_stdlib(ctx, target_triple)) ctx.file("WORKSPACE", "") ctx.file("BUILD", "\n".join(BUILD_components)) def _rust_toolchain_repository_proxy_impl(ctx): BUILD_components = [] for target_triple in [ctx.attr.exec_triple] + ctx.attr.extra_target_triples: BUILD_components.append(BUILD_for_toolchain( name = "{toolchain_prefix}_{target_triple}".format( toolchain_prefix = ctx.attr.toolchain_name_prefix, target_triple = target_triple, ), exec_triple = ctx.attr.exec_triple, parent_workspace_name = ctx.attr.parent_workspace_name, target_triple = target_triple, )) ctx.file("WORKSPACE", "") ctx.file("BUILD", "\n".join(BUILD_components)) """Composes a single workspace containing the toolchain components for compiling on a given platform to a series of target platforms. A given instance of this rule should be accompanied by a rust_toolchain_repository_proxy invocation to declare its toolchains to Bazel; the indirection allows separating toolchain selection from toolchain fetching Args: name: A unique name for this rule exec_triple: The Rust-style target triple for the compilation platform extra_target_triples: The Rust-style triples for extra compilation targets toolchain_name_prefix: The per-target prefix expected for the rust_toolchain declarations version: The version of the tool among "nightly", "beta', or an exact version. iso_date: The date of the tool (or None, if the version is a specific version). """ rust_toolchain_repository = repository_rule( attrs = { "version": attr.string(mandatory = True), "iso_date": attr.string(), "exec_triple": attr.string(mandatory = True), "extra_target_triples": attr.string_list(), "toolchain_name_prefix": attr.string(), }, implementation = _rust_toolchain_repository_impl, ) """Generates a toolchain-bearing repository that declares the toolchains from some other rust_toolchain_repository. Args: name: A unique name for this rule parent_workspace_name: The name of the other rust_toolchain_repository exec_triple: The Rust-style target triple for the compilation platform extra_target_triples: The Rust-style triples for extra compilation targets toolchain_name_prefix: The per-target prefix expected for the rust_toolchain declarations in the parent workspace. """ rust_toolchain_repository_proxy = repository_rule( attrs = { "parent_workspace_name": attr.string(mandatory = True), "exec_triple": attr.string(mandatory = True), "extra_target_triples": attr.string_list(), "toolchain_name_prefix": attr.string(), }, implementation = _rust_toolchain_repository_proxy_impl, ) def rust_repository_set(name, version, exec_triple, extra_target_triples, iso_date = None): """Assembles a remote repository for the given toolchain params, produces a proxy repository to contain the toolchain declaration, and registers the toolchains. N.B. A "proxy repository" is needed to allow for registering the toolchain (with constraints) without actually downloading the toolchain. Args: name: The name of the generated repository version: The version of the tool among "nightly", "beta', or an exact version. iso_date: The date of the tool (or None, if the version is a specific version). exec_triple: The Rust-style target that this compiler runs on extra_target_triples: Additional rust-style targets that this set of toolchains should support. """ rust_toolchain_repository( name = name, exec_triple = exec_triple, extra_target_triples = extra_target_triples, iso_date = iso_date, toolchain_name_prefix = DEFAULT_TOOLCHAIN_NAME_PREFIX, version = version, ) rust_toolchain_repository_proxy( name = name + "_toolchains", exec_triple = exec_triple, extra_target_triples = extra_target_triples, parent_workspace_name = name, toolchain_name_prefix = DEFAULT_TOOLCHAIN_NAME_PREFIX, ) all_toolchain_names = [] for target_triple in [exec_triple] + extra_target_triples: all_toolchain_names.append("@{name}_toolchains//:{toolchain_name_prefix}_{triple}".format( name = name, toolchain_name_prefix = DEFAULT_TOOLCHAIN_NAME_PREFIX, triple = target_triple, )) # Register toolchains native.register_toolchains(*all_toolchain_names) native.register_toolchains("@io_bazel_rules_rust//rust/private/dummy_cc_toolchain:dummy_cc_wasm32_toolchain")
py
1a49d5443cbd9f2105d67f4e3ac1f4773b530c23
""" lml.plugin ~~~~~~~~~~~~~~~~~~~ lml divides the plugins into two category: load-me-later plugins and load-me-now ones. load-me-later plugins refer to the plugins were loaded when needed due its bulky and/or memory hungry dependencies. Those plugins has to use lml and respect lml's design principle. load-me-now plugins refer to the plugins are immediately imported. All conventional Python classes are by default immediately imported. :class:`~lml.plugin.PluginManager` should be inherited to form new plugin manager class. If you have more than one plugins in your architecture, it is advisable to have one class per plugin type. :class:`~lml.plugin.PluginInfoChain` helps the plugin module to declare the available plugins in the module. :class:`~lml.plugin.PluginInfo` can be subclassed to describe your plugin. Its method :meth:`~lml.plugin.PluginInfo.tags` can be overridden to help its matching :class:`~lml.plugin.PluginManager` to look itself up. :copyright: (c) 2017-2020 by Onni Software Ltd. :license: New BSD License, see LICENSE for more details """ import logging from collections import defaultdict from lml.utils import json_dumps, do_import_class PLUG_IN_MANAGERS = {} CACHED_PLUGIN_INFO = defaultdict(list) log = logging.getLogger(__name__) class PluginInfo(object): """ Information about the plugin. It is used together with PluginInfoChain to describe the plugins. Meanwhile, it is a class decorator and can be used to register a plugin immediately for use, in other words, the PluginInfo decorated plugin class is not loaded later. Parameters ------------- name: plugin name absolute_import_path: absolute import path from your plugin name space for your plugin class tags: a list of keywords help the plugin manager to retrieve your plugin keywords: Another custom properties. Examples ------------- For load-me-later plugins: >>> info = PluginInfo("sample", ... abs_class_path='lml.plugin.PluginInfo', # demonstration only. ... tags=['load-me-later'], ... custom_property = 'I am a custom property') >>> print(info.module_name) lml >>> print(info.custom_property) I am a custom property For load-me-now plugins: >>> @PluginInfo("sample", tags=['load-me-now']) ... class TestPlugin: ... def echo(self, words): ... print("echoing %s" % words) Now let's retrive the second plugin back: >>> class SamplePluginManager(PluginManager): ... def __init__(self): ... PluginManager.__init__(self, "sample") >>> sample_manager = SamplePluginManager() >>> test_plugin=sample_manager.get_a_plugin("load-me-now") >>> test_plugin.echo("hey..") echoing hey.. """ def __init__( self, plugin_type, abs_class_path=None, tags=None, **keywords ): self.plugin_type = plugin_type self.absolute_import_path = abs_class_path self.cls = None self.properties = keywords self.__tags = tags def __getattr__(self, name): if name == "module_name": if self.absolute_import_path: module_name = self.absolute_import_path.split(".")[0] else: module_name = self.cls.__module__ return module_name return self.properties.get(name) def tags(self): """ A list of tags for identifying the plugin class The plugin class is described at the absolute_import_path """ if self.__tags is None: yield self.plugin_type else: for tag in self.__tags: yield tag def __repr__(self): rep = { "plugin_type": self.plugin_type, "path": self.absolute_import_path, } rep.update(self.properties) return json_dumps(rep) def __call__(self, cls): self.cls = cls _register_a_plugin(self, cls) return cls class PluginInfoChain(object): """ Pandas style, chained list declaration It is used in the plugin packages to list all plugin classes """ def __init__(self, path): self._logger = logging.getLogger( self.__class__.__module__ + "." + self.__class__.__name__ ) self.module_name = path def add_a_plugin(self, plugin_type, submodule=None, **keywords): """ Add a plain plugin Parameters ------------- plugin_type: plugin manager name submodule: the relative import path to your plugin class """ a_plugin_info = PluginInfo( plugin_type, self._get_abs_path(submodule), **keywords ) self.add_a_plugin_instance(a_plugin_info) return self def add_a_plugin_instance(self, plugin_info_instance): """ Add a plain plugin Parameters ------------- plugin_info_instance: an instance of PluginInfo The developer has to specify the absolute import path """ self._logger.debug( "add %s as '%s' plugin", plugin_info_instance.absolute_import_path, plugin_info_instance.plugin_type, ) _load_me_later(plugin_info_instance) return self def _get_abs_path(self, submodule): return "%s.%s" % (self.module_name, submodule) class PluginManager(object): """ Load plugin info into in-memory dictionary for later import Parameters -------------- plugin_type: the plugin type. All plugins of this plugin type will be registered to it. """ def __init__(self, plugin_type): self.plugin_name = plugin_type self.registry = defaultdict(list) self.tag_groups = dict() self._logger = logging.getLogger( self.__class__.__module__ + "." + self.__class__.__name__ ) _register_class(self) def get_a_plugin(self, key, **keywords): """ Get a plugin Parameters --------------- key: the key to find the plugins keywords: additional parameters for help the retrieval of the plugins """ self._logger.debug("get a plugin called") plugin = self.load_me_now(key) return plugin() def raise_exception(self, key): """Raise plugin not found exception Override this method to raise custom exception Parameters ----------------- key: the key to find the plugin """ self._logger.debug(self.registry.keys()) raise Exception("No %s is found for %s" % (self.plugin_name, key)) def load_me_later(self, plugin_info): """ Register a plugin info for later loading Parameters -------------- plugin_info: a instance of plugin info """ self._logger.debug("load %s later", plugin_info.absolute_import_path) self._update_registry_and_expand_tag_groups(plugin_info) def load_me_now(self, key, library=None, **keywords): """ Import a plugin from plugin registry Parameters ----------------- key: the key to find the plugin library: to use a specific plugin module """ if keywords: self._logger.debug(keywords) __key = key.lower() if __key in self.registry: for plugin_info in self.registry[__key]: cls = self.dynamic_load_library(plugin_info) module_name = _get_me_pypi_package_name(cls) if library and module_name != library: continue else: break else: # only library condition could raise an exception self._logger.debug("%s is not installed" % library) self.raise_exception(key) self._logger.debug("load %s now for '%s'", cls, key) return cls else: self.raise_exception(key) def dynamic_load_library(self, a_plugin_info): """Dynamically load the plugin info if not loaded Parameters -------------- a_plugin_info: a instance of plugin info """ if a_plugin_info.cls is None: self._logger.debug("import " + a_plugin_info.absolute_import_path) cls = do_import_class(a_plugin_info.absolute_import_path) a_plugin_info.cls = cls return a_plugin_info.cls def register_a_plugin(self, plugin_cls, plugin_info): """ for dynamically loaded plugin during runtime Parameters -------------- plugin_cls: the actual plugin class refered to by the second parameter plugin_info: a instance of plugin info """ self._logger.debug("register %s", _show_me_your_name(plugin_cls)) plugin_info.cls = plugin_cls self._update_registry_and_expand_tag_groups(plugin_info) def get_primary_key(self, key): __key = key.lower() return self.tag_groups.get(__key, None) def _update_registry_and_expand_tag_groups(self, plugin_info): primary_tag = None for index, key in enumerate(plugin_info.tags()): self.registry[key.lower()].append(plugin_info) if index == 0: primary_tag = key.lower() self.tag_groups[key.lower()] = primary_tag def _register_class(cls): """Reigister a newly created plugin manager""" log.debug("declare '%s' plugin manager", cls.plugin_name) PLUG_IN_MANAGERS[cls.plugin_name] = cls if cls.plugin_name in CACHED_PLUGIN_INFO: # check if there is early registrations or not for plugin_info in CACHED_PLUGIN_INFO[cls.plugin_name]: if plugin_info.absolute_import_path: log.debug( "load cached plugin info: %s", plugin_info.absolute_import_path, ) else: log.debug( "load cached plugin info: %s", _show_me_your_name(plugin_info.cls), ) cls.load_me_later(plugin_info) del CACHED_PLUGIN_INFO[cls.plugin_name] def _register_a_plugin(plugin_info, plugin_cls): """module level function to register a plugin""" manager = PLUG_IN_MANAGERS.get(plugin_info.plugin_type) if manager: manager.register_a_plugin(plugin_cls, plugin_info) else: # let's cache it and wait the manager to be registered try: log.debug("caching %s", _show_me_your_name(plugin_cls.__name__)) except AttributeError: log.debug("caching %s", _show_me_your_name(plugin_cls)) CACHED_PLUGIN_INFO[plugin_info.plugin_type].append(plugin_info) def _load_me_later(plugin_info): """ module level function to load a plugin later""" manager = PLUG_IN_MANAGERS.get(plugin_info.plugin_type) if manager: manager.load_me_later(plugin_info) else: # let's cache it and wait the manager to be registered log.debug( "caching %s for %s", plugin_info.absolute_import_path, plugin_info.plugin_type, ) CACHED_PLUGIN_INFO[plugin_info.plugin_type].append(plugin_info) def _get_me_pypi_package_name(module): try: module_name = module.__module__ root_module_name = module_name.split(".")[0] return root_module_name.replace("_", "-") except AttributeError: return None def _show_me_your_name(cls_func_or_data_type): try: return cls_func_or_data_type.__name__ except AttributeError: return str(type(cls_func_or_data_type))
py
1a49d60b02a01982a4819e291754fc7134bbfd2a
from django.contrib import admin from .models import Author,Book # Register your models here. admin.site.register(Author) admin.site.register(Book)
py
1a49d6430a87f69f8585f532cda417ea777d177f
import uqbar.strings import supriya.assets.synthdefs import supriya.realtime def test_01(server): group_a = supriya.realtime.Group() group_a.allocate(target_node=server) group_b = supriya.realtime.Group() group_b.allocate(target_node=server) synthdef = supriya.assets.synthdefs.test assert synthdef not in server synth_a = supriya.realtime.Synth(synthdef) assert synthdef not in server assert not synth_a.is_allocated group_a.append(synth_a) assert synthdef in server assert synth_a.is_allocated assert synth_a.parent is group_a assert synth_a in group_a assert synth_a not in group_b server_state = str(server.query_remote_nodes()) assert server_state == uqbar.strings.normalize( """ NODE TREE 0 group 1 group 1001 group 1000 group 1002 test """ ) group_b.append(synth_a) assert synthdef in server assert synth_a.is_allocated assert synth_a.parent is group_b assert synth_a in group_b assert synth_a not in group_a server_state = str(server.query_remote_nodes()) assert server_state == uqbar.strings.normalize( """ NODE TREE 0 group 1 group 1001 group 1002 test 1000 group """ ) synth_b = supriya.realtime.Synth(synthdef) assert not synth_b.is_allocated assert synth_b.parent is None group_b.append(synth_b) assert synth_b.is_allocated assert synth_b.parent is group_b server_state = str(server.query_remote_nodes()) assert server_state == uqbar.strings.normalize( """ NODE TREE 0 group 1 group 1001 group 1002 test 1003 test 1000 group """ )
py
1a49d6db2926ca98b44f2022024776f86a61a88b
""" Imports the various compute backends """ from typing import Set from ..exceptions import InputError, ResourceError from .cfour import CFOURHarness from .dftd3 import DFTD3Harness from .entos import EntosHarness from .gamess import GAMESSHarness from .molpro import MolproHarness from .mopac import MopacHarness from .mp2d import MP2DHarness from .nwchem import NWChemHarness from .openmm import OpenMMHarness from .psi4 import Psi4Harness from .qchem import QChemHarness from .rdkit import RDKitHarness from .terachem import TeraChemHarness from .torchani import TorchANIHarness from .turbomole import TurbomoleHarness __all__ = ["register_program", "get_program", "list_all_programs", "list_available_programs"] programs = {} def register_program(entry_point: "ProgramHarness") -> None: """ Register a new ProgramHarness with QCEngine. """ name = entry_point.name if name.lower() in programs.keys(): raise ValueError("{} is already a registered program.".format(name)) programs[name.lower()] = entry_point def unregister_program(name: str) -> None: """ Unregisters a given program. """ ret = programs.pop(name.lower(), None) if ret is None: raise KeyError(f"Program {name} is not registered with QCEngine") def get_program(name: str, check: bool = True) -> "ProgramHarness": """ Returns a program's executor class Parameters ---------- check ``True`` Do raise error if program not found. ``False`` is handy for the specialized case of calling non-execution methods (like parsing for testing) on the returned ``Harness``. """ name = name.lower() if name not in programs: raise InputError(f"Program {name} is not registered to QCEngine.") ret = programs[name] if check: try: ret.found(raise_error=True) except ModuleNotFoundError as err: raise ResourceError(f"Program {name} is registered with QCEngine, but cannot be found.") from err return ret def list_all_programs() -> Set[str]: """ List all programs registered by QCEngine. """ return set(programs.keys()) def list_available_programs() -> Set[str]: """ List all programs that can be exectued (found) by QCEngine. """ ret = set() for k, p in programs.items(): if p.found(): ret.add(k) return ret # Quantum register_program(CFOURHarness()) register_program(EntosHarness()) register_program(GAMESSHarness()) register_program(MolproHarness()) register_program(NWChemHarness()) register_program(Psi4Harness()) register_program(QChemHarness()) register_program(TeraChemHarness()) register_program(TurbomoleHarness()) # Semi-empirical register_program(MopacHarness()) # AI register_program(TorchANIHarness()) # Molecular Mechanics register_program(RDKitHarness()) register_program(OpenMMHarness()) # Analytical Corrections register_program(DFTD3Harness()) register_program(MP2DHarness())
py
1a49d71c23f01981fbe52e34a75d69570766d75b
# Copyright 2014 Diamond Light Source Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: image_data :platform: Unix :synopsis: A module for loading any of the FabIO python module supported \ image formats (e.g. tiffs) .. moduleauthor:: Nicola Wadeson <[email protected]> """ import os import fabio import numpy as np from savu.data.data_structures.data_types.base_type import BaseType class ImageData(BaseType): """ This class loads any of the FabIO python module supported image formats. """ def __init__(self, folder, Data, dim, shape=None, data_prefix=None): self.folder = folder self._data_obj = Data self.frame_dim = dim self.shape = shape self.prefix = data_prefix super(ImageData, self).__init__() self.nFrames = None self.file_names = self.__get_file_names(folder, data_prefix) self.start_file = fabio.open(self.file_names[0]) self.dtype = self.start_file.data[0, 0].dtype self.image_shape = (self.start_file.dim2, self.start_file.dim1) if shape is None: self.shape = (self.nFrames,) else: self.shape = shape self.full_shape = self.image_shape + self.shape self.image_dims = set(np.arange(len(self.full_shape)))\ .difference(set(self.frame_dim)) def clone_data_args(self, args, kwargs, extras): args = ['folder', 'self', 'frame_dim'] kwargs['shape'] = 'shape' kwargs['prefix'] = 'prefix' return args, kwargs, extras def __getitem__(self, index): index = [index[i] if index[i].start is not None else slice(0, self.shape[i]) for i in range(len(index))] size = [len(np.arange(i.start, i.stop, i.step)) for i in index] data = np.empty(size, dtype=self.dtype) tiff_slices = [index[i] for i in self.image_dims] # shift tiff dims to start from 0 index = list(index) for i in self.image_dims: end = \ len(np.arange(0, index[i].stop-index[i].start, index[i].step)) index[i] = slice(0, end, 1) index, frameidx = self.__get_indices(index, size) for i in range(len(frameidx)): image = fabio.open(self.file_names[frameidx[i]]).data[tuple(tiff_slices)] for d in self.frame_dim: image = np.expand_dims(image, axis=d) data[tuple(index[i])] = image return data def __get_file_names(self, folder, prefix): import re import glob # files = os.listdir(folder) fullpath = str.strip(folder) if prefix is not None: fullpath = os.path.join(folder, prefix + '*') else: fullpath = os.path.join(fullpath, '*') files = glob.glob(fullpath) self.nFrames = len(files) file_nos = [int(re.findall(r'\d+', f)[-1]) for f in files] sort_idx = np.argsort(file_nos) self.start_no = file_nos[sort_idx[0]] return list(np.array(files)[sort_idx]) def get_shape(self): dims = list(self.image_dims) + self.frame_dim shape = [x for _, x in sorted(zip(dims, self.full_shape))] return tuple(shape) def __get_idx(self, dim, sl, shape): c = int(np.prod(shape[0:dim])) r = int(np.prod(shape[dim+1:])) vals = np.arange(sl.start, sl.stop, sl.step) vals_shift = np.arange(0, len(vals), 1) return [np.ravel(np.kron(v, np.ones((r, c)))) for v in \ [vals, vals_shift]] def __get_indices(self, index, size): """ Get the indices for the new data array and the file numbers. """ # indices for non-image dims only sub_idx = np.array(index)[np.array(self.frame_dim)] sub_size = [size[i] for i in self.frame_dim] idx_list = [] frame_list = [] for dim in range(len(sub_idx)): frame, idx = self.__get_idx(dim, sub_idx[dim], sub_size) frame_list.append(frame.astype(int)) idx_list.append(idx.astype(int)) lshape = idx_list[0].shape[0] # this is just size of first frame dim? sub_size[0] index = np.tile(index, (lshape, 1)) frameidx = np.zeros(lshape) for dim in range(len(sub_idx)): index[:, self.frame_dim[dim]] = \ [slice(i, i+1, 1) for i in idx_list[dim]] frameidx[:] += frame_list[dim]*np.prod(self.shape[dim+1:]) return index.tolist(), frameidx.astype(int)
py
1a49d7dbb9a60a4eb5a6394010b9fac7927a936a
import boto3 import botocore import os import io import json import time import sys from google.protobuf import text_format from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState import logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger("SageS3Client") class SageS3Client(): def __init__(self, bucket=None, s3_prefix=None, aws_region=None): self.aws_region = aws_region self.bucket = bucket self.s3_prefix = s3_prefix self.config_key = os.path.normpath(s3_prefix + "/ip/ip.json") self.markov_prefix = os.path.normpath(s3_prefix + "/markov") self.hyperparameters_key = os.path.normpath(s3_prefix + "/ip/hyperparameters.json") self.done_file_key = os.path.normpath(s3_prefix + "/ip/done") self.model_checkpoints_prefix = os.path.normpath(s3_prefix + "/model/") + "/" self.lock_file = ".lock" logger.info("Initializing SageS3Client...") def get_client(self): session = boto3.session.Session() return session.client('s3', region_name=self.aws_region) def _get_s3_key(self, key): return os.path.normpath(self.model_checkpoints_prefix + "/" + key) def download_markov(self): s3_client = self.get_client() response = s3_client.list_objects_v2(Bucket=self.bucket, Prefix=self.markov_prefix) if "Contents" in response: for i in response["Contents"]: if ".ipynb_checkpoints" in i["Key"]: continue s3_client.download_file(Bucket=self.bucket, Key=i["Key"], Filename=i["Key"].replace(self.markov_prefix,"./custom_files/markov")) logger.info("Downloaded %s" % i["Key"]) def write_ip_config(self, ip): s3_client = self.get_client() data = {"IP": ip} json_blob = json.dumps(data) file_handle = io.BytesIO(json_blob.encode()) file_handle_done = io.BytesIO(b'done') s3_client.upload_fileobj(file_handle, self.bucket, self.config_key) s3_client.upload_fileobj(file_handle_done, self.bucket, self.done_file_key) def upload_hyperparameters(self, hyperparams_json): s3_client = self.get_client() file_handle = io.BytesIO(hyperparams_json.encode()) s3_client.upload_fileobj(file_handle, self.bucket, self.hyperparameters_key) def upload_model(self, checkpoint_dir): s3_client = self.get_client() num_files = 0 for root, dirs, files in os.walk("./" + checkpoint_dir): for filename in files: abs_name = os.path.abspath(os.path.join(root, filename)) s3_client.upload_file(abs_name, self.bucket, "%s/%s/%s" % (self.s3_prefix, checkpoint_dir, filename)) num_files += 1 def download_model(self, checkpoint_dir): s3_client = self.get_client() filename = "None" try: filename = os.path.abspath(os.path.join(checkpoint_dir, "checkpoint")) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) while True: response = s3_client.list_objects_v2(Bucket=self.bucket, Prefix=self._get_s3_key(self.lock_file)) if "Contents" not in response: # If no lock is found, try getting the checkpoint try: s3_client.download_file(Bucket=self.bucket, Key=self._get_s3_key("checkpoint"), Filename=filename) except Exception as e: time.sleep(2) continue else: time.sleep(2) continue ckpt = CheckpointState() if os.path.exists(filename): contents = open(filename, 'r').read() text_format.Merge(contents, ckpt) rel_path = ckpt.model_checkpoint_path checkpoint = int(rel_path.split('_Step')[0]) response = s3_client.list_objects_v2(Bucket=self.bucket, Prefix=self._get_s3_key(rel_path)) if "Contents" in response: num_files = 0 for obj in response["Contents"]: filename = os.path.abspath(os.path.join(checkpoint_dir, obj["Key"].replace(self.model_checkpoints_prefix, ""))) s3_client.download_file(Bucket=self.bucket, Key=obj["Key"], Filename=filename) num_files += 1 return True except Exception as e: logger.error("{} while downloading the model {} from S3".format(e, filename)) return False def get_ip(self): s3_client = self.get_client() self._wait_for_ip_upload() try: s3_client.download_file(self.bucket, self.config_key, 'ip.json') with open("ip.json") as f: ip = json.load(f)["IP"] return ip except Exception as e: logger.error("Exception [{}] occured, Cannot fetch IP of redis server running in SageMaker. Job failed!".format(e)) sys.exit(1) def _wait_for_ip_upload(self, timeout=600): s3_client = self.get_client() time_elapsed = 0 while True: response = s3_client.list_objects(Bucket=self.bucket, Prefix=self.done_file_key) if "Contents" not in response: time.sleep(1) time_elapsed += 1 if time_elapsed % 5 == 0: logger.info ("Waiting for SageMaker Redis server IP... Time elapsed: %s seconds" % time_elapsed) if time_elapsed >= timeout: logger.error("Cannot retrieve IP of redis server running in SageMaker. Job failed!") sys.exit(1) else: return def download_file(self, s3_key, local_path): s3_client = self.get_client() try: s3_client.download_file(self.bucket, s3_key, local_path) return True except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": logger.info("Exception [{}] occured on download file-{} from s3 bucket-{} key-{}".format(e.response['Error'], local_path, self.bucket, s3_key)) return False else: logger.error("boto client exception error [{}] occured on download file-{} from s3 bucket-{} key-{}" .format(e.response['Error'], local_path, self.bucket, s3_key)) return False except Exception as e: logger.error("Exception [{}] occcured on download file-{} from s3 bucket-{} key-{}".format(e, local_path, self.bucket, s3_key)) return False def upload_file(self, s3_key, local_path): s3_client = self.get_client() try: s3_client.upload_file(Filename=local_path, Bucket=self.bucket, Key=s3_key) return True except Exception as e: logger.error("{} on upload file-{} to s3 bucket-{} key-{}".format(e, local_path, self.bucket, s3_key)) return False if __name__ == '__main__': CUSTOM_FILES_PATH = "./custom_files" dirs_to_create = ["./custom_files", "./custom_files/markov", "./custom_files/markov/actions", "./custom_files/markov/presets", "./custom_files/markov/environments", "./custom_files/markov/rewards" ] for path in dirs_to_create: if not os.path.exists(path): os.makedirs(path) s3_bucket = os.environ.get("SAGEMAKER_SHARED_S3_BUCKET", "gsaur-test") s3_prefix = os.environ.get("SAGEMAKER_SHARED_S3_PREFIX", "sagemaker") aws_region = os.environ.get("APP_REGION", "us-east-1") s3_client = SageS3Client(bucket=s3_bucket, s3_prefix=s3_prefix, aws_region=aws_region) s3_client.download_markov()
py
1a49d8a951f5371de63ebed74b407992f47caebe
from utils import run_command_and_get_output from utils import get_log_lines def test_call_many_args(cartridge_cmd, custom_admin_running_instances, tmpdir): project = custom_admin_running_instances['project'] run_dir = project.get_run_dir() base_cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, 'echo_user', ] # all args cmd = base_cmd + [ '--username', 'Elizabeth', '--age', '23', '--loves-cakes', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert get_log_lines(output) == [ '• Hi, Elizabeth!', '• You are 23 years old', '• I know that you like cakes!', ] # age missed # check that default number flag value (0) isn't passed cmd = base_cmd + [ '--username', 'Elizabeth', '--loves-cakes', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert get_log_lines(output) == [ "• Hi, Elizabeth!", "• I don't know your age", "• I know that you like cakes!", ] # bool flag is false cmd = base_cmd + [ '--username', 'Elizabeth', '--loves-cakes=false', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert get_log_lines(output) == [ "• Hi, Elizabeth!", "• I don't know your age", "• How can you not love cakes?", ] def test_func_long_arg(cartridge_cmd, custom_admin_running_instances, tmpdir): project = custom_admin_running_instances['project'] run_dir = project.get_run_dir() cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, 'func_long_arg', '--long-arg', 'some-value', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert get_log_lines(output) == [ '• func_long_arg was called with "some-value" arg', ] def test_func_rets_str(cartridge_cmd, custom_admin_running_instances, tmpdir): project = custom_admin_running_instances['project'] run_dir = project.get_run_dir() cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, 'func_rets_str', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert get_log_lines(output) == [ '• func_rets_str was called', ] def test_func_rets_non_str(cartridge_cmd, custom_admin_running_instances, tmpdir): project = custom_admin_running_instances['project'] run_dir = project.get_run_dir() cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, 'func_rets_non_str', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert get_log_lines(output) == [ '• 666', '• Admin function should return string or string array value', ] def test_func_rets_err(cartridge_cmd, custom_admin_running_instances, tmpdir): project = custom_admin_running_instances['project'] run_dir = project.get_run_dir() cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, 'func_rets_err', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert get_log_lines(output) == [ '⨯ Failed to call "func_rets_err": Some horrible error', ] def test_func_raises_err(cartridge_cmd, custom_admin_running_instances, tmpdir): project = custom_admin_running_instances['project'] run_dir = project.get_run_dir() cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, 'func_raises_err', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert '⨯ Failed to call "func_raises_err":' in output assert 'Some horrible error raised' in output def test_print(cartridge_cmd, custom_admin_running_instances, tmpdir): project = custom_admin_running_instances['project'] run_dir = project.get_run_dir() ITERATIONS_NUM = 3 cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, 'func_print', '--num', str(ITERATIONS_NUM), ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 iterations_output = [] for i in range(1, ITERATIONS_NUM+1): iterations_output.extend([ '• Iteration %s (printed)' % i, '• Iteration %s (pushed)' % i, ]) assert get_log_lines(output) == iterations_output + [ '• I am some great result', ]
py
1a49d92d1cfe89f89126e57234850dfc1bf2ff72
"""Support for Owlet baby monitors.""" import logging import voluptuous as vol from homeassistant.const import CONF_NAME, CONF_PASSWORD, CONF_USERNAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.discovery import load_platform from .const import ( SENSOR_BASE_STATION, SENSOR_HEART_RATE, SENSOR_MOVEMENT, SENSOR_OXYGEN_LEVEL, ) _LOGGER = logging.getLogger(__name__) DOMAIN = "owlet" SENSOR_TYPES = [ SENSOR_OXYGEN_LEVEL, SENSOR_HEART_RATE, SENSOR_BASE_STATION, SENSOR_MOVEMENT, ] CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_NAME): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) def setup(hass, config): """Set up owlet component.""" from pyowlet.PyOwlet import PyOwlet username = config[DOMAIN][CONF_USERNAME] password = config[DOMAIN][CONF_PASSWORD] name = config[DOMAIN].get(CONF_NAME) try: device = PyOwlet(username, password) except KeyError: _LOGGER.error( "Owlet authentication failed. Please verify your " "credentials are correct" ) return False device.update_properties() if not name: name = "{}'s Owlet".format(device.baby_name) hass.data[DOMAIN] = OwletDevice(device, name, SENSOR_TYPES) load_platform(hass, "sensor", DOMAIN, {}, config) load_platform(hass, "binary_sensor", DOMAIN, {}, config) return True class OwletDevice: """Represents a configured Owlet device.""" def __init__(self, device, name, monitor): """Initialize device.""" self.name = name self.monitor = monitor self.device = device
py
1a49dabc4dead57264af0c383338ead3f94183b6
from typing import Any, Dict, List, Type, TypeVar, Union import attr from ..models.grupo import Grupo from ..models.trecho import Trecho from ..models.viagem import Viagem from ..types import UNSET, Unset T = TypeVar("T", bound="PurchaseEventIn") @attr.s(auto_attribs=True) class PurchaseEventIn: """ Attributes: trecho (Trecho): grupo (Grupo): viagem (Viagem): token (Union[Unset, str]): """ trecho: Trecho grupo: Grupo viagem: Viagem token: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: trecho = self.trecho.to_dict() grupo = self.grupo.to_dict() viagem = self.viagem.to_dict() token = self.token field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { "trecho": trecho, "grupo": grupo, "viagem": viagem, } ) if token is not UNSET: field_dict["token"] = token return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() trecho = Trecho.from_dict(d.pop("trecho")) grupo = Grupo.from_dict(d.pop("grupo")) viagem = Viagem.from_dict(d.pop("viagem")) token = d.pop("token", UNSET) purchase_event_in = cls( trecho=trecho, grupo=grupo, viagem=viagem, token=token, ) purchase_event_in.additional_properties = d return purchase_event_in @property def additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key: str) -> Any: return self.additional_properties[key] def __setitem__(self, key: str, value: Any) -> None: self.additional_properties[key] = value def __delitem__(self, key: str) -> None: del self.additional_properties[key] def __contains__(self, key: str) -> bool: return key in self.additional_properties
py
1a49db2a1652a8bae1e75985de8f87ff74c39ebb
# ============================================================================= # Quenouille Stack Overflow Testing # ============================================================================= # # Reproducing issues related to recursion & stack overflow. # from quenouille import imap_unordered DATA = range(3000) def worker(i): return i for i in imap_unordered(DATA, worker, 25, group=lambda x: 1, group_parallelism=1, group_throttle=0.1): print(i)
py
1a49dba9625bc0f3cf8dbdefe730c0b11f91c342
COLUMNS = [ 'TIPO_REGISTRO', 'NRO_FILIACAO_MATRIZ_OU_GRUPO_COMERCIAL', 'NRO_RESUMO_VENDAS', 'DT_CV', 'VL_RECARGA', 'NRO_COMPROVANTES', 'NRO_TELEFONE', 'BANDEIRA', 'COD_AUORIZACAO' ]
py
1a49dbdde98959b36a804f070fd805b320d2792d
''' This module has utilities1 for the arithmetic functions. The parameters are of variable length. ''' __author__ = 'vinay' __version__ = "alpha_1" def myvsum(*args): ''' function which takes in variable count of numbers and returns their sum ''' s = 0 for n in args: s = s + n return s def myvproduct(*args): ''' function which takes in variable count of numbers and returns their product ''' p = 1 for n in args: p = p * n return p if __name__ == '__main__': s = myvsum(1,2,3,4,5,6,7) print(s) p = myvproduct(1,2,3,4,5,6,7) print(p)
py
1a49dbede987b9807e6ea620180c853c16fdc466
import os import sys from tempfile import mkstemp import db_utils as dbutils import fixture_utils as fixutils import pexpect from steps.wrappers import run_cli, wait_prompt test_log_file = os.path.join(os.environ['HOME'], '.mycli.test.log') def before_all(context): """Set env parameters.""" os.environ['LINES'] = "100" os.environ['COLUMNS'] = "100" os.environ['EDITOR'] = 'ex' os.environ['LC_ALL'] = 'en_US.utf8' os.environ['PROMPT_TOOLKIT_NO_CPR'] = '1' test_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) login_path_file = os.path.join(test_dir, 'mylogin.cnf') os.environ['MYSQL_TEST_LOGIN_FILE'] = login_path_file context.package_root = os.path.abspath( os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) os.environ["COVERAGE_PROCESS_START"] = os.path.join(context.package_root, '.coveragerc') context.exit_sent = False vi = '_'.join([str(x) for x in sys.version_info[:3]]) db_name = context.config.userdata.get( 'my_test_db', None) or "mycli_behave_tests" db_name_full = '{0}_{1}'.format(db_name, vi) # Store get params from config/environment variables context.conf = { 'host': context.config.userdata.get( 'my_test_host', os.getenv('PYTEST_HOST', 'localhost') ), 'user': context.config.userdata.get( 'my_test_user', os.getenv('PYTEST_USER', 'root') ), 'pass': context.config.userdata.get( 'my_test_pass', os.getenv('PYTEST_PASSWORD', None) ), 'cli_command': context.config.userdata.get( 'my_cli_command', None) or sys.executable + ' -c "import coverage ; coverage.process_startup(); import mycli.main; mycli.main.cli()"', 'dbname': db_name, 'dbname_tmp': db_name_full + '_tmp', 'vi': vi, 'pager_boundary': '---boundary---', } _, my_cnf = mkstemp() with open(my_cnf, 'w') as f: f.write( '[client]\n' 'pager={0} {1} {2}\n'.format( sys.executable, os.path.join(context.package_root, 'test/features/wrappager.py'), context.conf['pager_boundary']) ) context.conf['defaults-file'] = my_cnf context.conf['myclirc'] = os.path.join(context.package_root, 'test', 'myclirc') context.cn = dbutils.create_db(context.conf['host'], context.conf['user'], context.conf['pass'], context.conf['dbname']) context.fixture_data = fixutils.read_fixture_files() def after_all(context): """Unset env parameters.""" dbutils.close_cn(context.cn) dbutils.drop_db(context.conf['host'], context.conf['user'], context.conf['pass'], context.conf['dbname']) # Restore env vars. #for k, v in context.pgenv.items(): # if k in os.environ and v is None: # del os.environ[k] # elif v: # os.environ[k] = v def before_step(context, _): context.atprompt = False def before_scenario(context, _): with open(test_log_file, 'w') as f: f.write('') run_cli(context) wait_prompt(context) def after_scenario(context, _): """Cleans up after each test complete.""" with open(test_log_file) as f: for line in f: if 'error' in line.lower(): raise RuntimeError(f'Error in log file: {line}') if hasattr(context, 'cli') and not context.exit_sent: # Quit nicely. if not context.atprompt: user = context.conf['user'] host = context.conf['host'] dbname = context.currentdb context.cli.expect_exact( '{0}@{1}:{2}> '.format( user, host, dbname ), timeout=5 ) context.cli.sendcontrol('d') context.cli.expect_exact(pexpect.EOF, timeout=5) # TODO: uncomment to debug a failure # def after_step(context, step): # if step.status == "failed": # import ipdb; ipdb.set_trace()
py
1a49dc49a8a22f5975ba4f918189e779bedeee5e
import logging from typing import List, Optional, Sequence import telebot from quiz_bot.entity import ( AnswerEvaluation, AnyChallengeInfo, ChallengeSettings, CheckedResult, ContextChallenge, ContextParticipant, ContextUser, EvaluationStatus, PictureModel, QuizState, RegularChallengeInfo, UnexpectedChallengeAmountError, ) from quiz_bot.quiz.errors import ChallengeNotFoundError, NullableParticipantError from quiz_bot.quiz.keeper import ChallengeKeeper from quiz_bot.quiz.registrar import Registrar from quiz_bot.storage import IChallengeStorage logger = logging.getLogger(__name__) class ChallengeMaster: def __init__( self, storage: IChallengeStorage, settings: ChallengeSettings, registrar: Registrar, keeper: ChallengeKeeper, ) -> None: self._storage = storage self._settings = settings self._registrar = registrar self._keeper = keeper self._sync_challenge() if all((self._settings.autostart, not self._keeper.has_data or self._keeper.finished,)): self.start_next_challenge() @property def keeper(self) -> ChallengeKeeper: return self._keeper @property def _is_last_challenge(self) -> bool: return self._keeper.has_data and self._keeper.number == self._settings.challenge_amount def _save_challenge_data(self, challenge: ContextChallenge) -> None: self._keeper.set(data=challenge, info=self._settings.get_challenge_model(challenge.id)) def _sync_challenge(self) -> None: actual_challenge = self._storage.get_actual_challenge() if actual_challenge is not None: logger.info("Actual challenge with ID %s", actual_challenge.id) self._save_challenge_data(actual_challenge) return finished_challenge_ids = self._storage.get_finished_challenge_ids() if not finished_challenge_ids: logger.info("Quiz has not been running yet.") return if len(finished_challenge_ids) > self._settings.challenge_amount: raise UnexpectedChallengeAmountError( f"Not equal challenge amount: expected {self._settings.challenge_amount}, " f"got {len(finished_challenge_ids)} finished challenges!" ) logger.info("Quiz is not running now. Finished challenges: %s", finished_challenge_ids) challenge = self._storage.get_challenge(finished_challenge_ids[-1]) if challenge is None: raise ChallengeNotFoundError("Could not found finished challenge - WTF?") self._save_challenge_data(challenge) def resolve_quiz_state(self) -> QuizState: self._sync_challenge() if not self._keeper.has_data: return QuizState.NEW if self._keeper.finished: if self._is_last_challenge: return QuizState.FINISHED return QuizState.WAIT_NEXT return QuizState.IN_PROGRESS def _get_next_challenge_info(self) -> AnyChallengeInfo: if not self._keeper.has_data: return self._settings.get_challenge_model(1) return self._settings.get_challenge_model(self._keeper.number + 1) def start_next_challenge(self) -> None: next_challenge_info = self._get_next_challenge_info() next_challenge = self._storage.create_challenge( name=next_challenge_info.name, phase_amount=next_challenge_info.phase_amount, winner_amount=next_challenge_info.max_winners, duration=next_challenge_info.duration, ) logger.info("Next challenge: %s", next_challenge) self._sync_challenge() def _get_evaluation( self, status: EvaluationStatus, replies: Optional[Sequence[str]] = (), picture: Optional[PictureModel] = None ) -> AnswerEvaluation: return AnswerEvaluation(status=status, replies=replies, quiz_state=self.resolve_quiz_state(), picture=picture) def start_challenge_for_user( self, user: ContextUser, status: EvaluationStatus = EvaluationStatus.NOT_CHECKED, additional_replies: Sequence[str] = (), ) -> AnswerEvaluation: participant = self._registrar.get_participation_for_user(user=user, challenge=self._keeper.data) if participant is None: participant = self._registrar.create_participation_for_user(user=user, challenge=self._keeper.data) result = self._keeper.checker.create_initial_phase(participant=participant) logger.warning("Started challenge ID %s for user @%s", self._keeper.number, user.nick_name) replies = list(additional_replies) + [ self._settings.get_start_notification( challenge_num=self._keeper.number, challenge_name=self._keeper.info.name, description=f"{self._keeper.info.description}", ), ] if isinstance(self._keeper.info, RegularChallengeInfo): replies.append( self._settings.get_next_answer_notification( question=self._keeper.info.get_question(result.phase), question_num=result.phase, ) ) return self._get_evaluation(status=status, replies=replies, picture=self._keeper.info.picture,) return self._get_evaluation( status=status, replies=[self._settings.get_already_started_notification(challenge_name=self._keeper.info.name)], ) def _resolve_next_event(self, participant: ContextParticipant, result: CheckedResult) -> AnswerEvaluation: status = EvaluationStatus.CORRECT if result.next_phase is not None: replies: List[str] = [] if isinstance(self._keeper.info, RegularChallengeInfo): replies.append( self._settings.get_next_answer_notification( question=self._keeper.info.get_question(result.next_phase), question_num=result.next_phase, ) ) return self._get_evaluation(status=status, replies=replies) self._registrar.finish_participation(participant) pretender_replies = [ self._settings.get_pretender_notification( challenge_name=self._keeper.info.name, scores=participant.scores, finished_at=participant.finished_at, ) ] has_all_winners = self._registrar.all_winners_exist(challenge=self._keeper.data) if not has_all_winners: return self._get_evaluation(status=status, replies=pretender_replies) self._storage.finish_actual_challenge() logger.info( "Challenge #%s '%s' finished with all winners resolution!", self._keeper.number, self._keeper.info.name, ) if not self._is_last_challenge and self._settings.autostart: self.start_next_challenge() return self.start_challenge_for_user( user=participant.user, status=status, additional_replies=pretender_replies ) return self._get_evaluation(status=status, replies=pretender_replies) def evaluate(self, user: ContextUser, message: telebot.types.Message) -> AnswerEvaluation: # noqa: C901 if self._keeper.out_of_date: self._storage.finish_actual_challenge() participant = self._registrar.get_participation_for_user(user=user, challenge=self._keeper.data) if participant is None: logger.info( "User @%s is not a Participant for challenge with ID %s!", user.nick_name, self._keeper.data.id, ) return self._get_evaluation(status=EvaluationStatus.NOT_CHECKED) if participant.completed_challenge: logger.info( "User @%s has already completed challenge with ID %s!", user.nick_name, self._keeper.data.id, ) return self._get_evaluation(status=EvaluationStatus.ALREADY_COMPLETED) if self._keeper.finished: return self._get_evaluation(status=EvaluationStatus.INCORRECT) checked_result = self._keeper.checker.check_answer( participant=participant, data=self._keeper.data, info=self._keeper.info, message=message # type: ignore ) if not checked_result.correct: return self._get_evaluation(status=EvaluationStatus.INCORRECT) self._registrar.add_correct_answer(participant) logger.info("Added +1 score for user '%s'!", user.nick_name) return self._resolve_next_event(participant=participant, result=checked_result) def skip_evaluation(self, user: ContextUser) -> AnswerEvaluation: participant = self._registrar.get_participation_for_user(user=user, challenge=self._keeper.data) if participant is None: raise NullableParticipantError unchecked_result = self._keeper.checker.skip_question(participant=participant, data=self._keeper.data) return self._resolve_next_event(participant=participant, result=unchecked_result) def get_challenge_info(self, challenge_id: Optional[int] = None) -> str: if not isinstance(challenge_id, int): logger.info("Challenge ID was not specified, so use current challenge information.") challenge_id = self._keeper.number context_challenge = self._storage.get_challenge(challenge_id) if context_challenge is None: raise ChallengeNotFoundError(f"Challenge with ID {challenge_id} was not found!") self._save_challenge_data(context_challenge) winner_results = self._registrar.get_winners(self._keeper.data) if not self._keeper.finished: results = self._settings.get_time_left_info(self._keeper.finish_after) else: results = ( "\n".join(self._settings.get_results_info(winner_results)) + "\n\n" + self._settings.get_time_over_info(self._keeper.data) ) return self._settings.get_challenge_info(number=self._keeper.number, info=self._keeper.info, results=results)
py
1a49dd98349a63caa357da76727d6458aa82a0b5
#!/usr/bin/python3 # Copyright (c) 2016, Daniele Venzano # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Master component entry point.""" from zoe_master.entrypoint import main if __name__ == '__main__': main()
py
1a49e11a67a6fd6d7ba08c38c43b11a9d11ebc54
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Resource Constants RESOURCE_ADMIN_MENU = "Admin" RESOURCE_AIRFLOW = "Airflow" RESOURCE_AUDIT_LOG = "Audit Logs" RESOURCE_BROWSE_MENU = "Browse" RESOURCE_DAG = "DAGs" RESOURCE_DAG_PREFIX = "DAG:" RESOURCE_DOCS_MENU = "Docs" RESOURCE_DOCS = "Documentation" RESOURCE_CONFIG = "Configurations" RESOURCE_CONNECTION = "Connections" RESOURCE_DAG_CODE = "DAG Code" RESOURCE_DAG_RUN = "DAG Runs" RESOURCE_IMPORT_ERROR = "ImportError" RESOURCE_JOB = "Jobs" RESOURCE_POOL = "Pools" RESOURCE_PLUGIN = "Plugins" RESOURCE_SLA_MISS = "SLA Misses" RESOURCE_TASK_INSTANCE = "Task Instances" RESOURCE_TASK_LOG = "Task Logs" RESOURCE_TASK_RESCHEDULE = "Task Reschedules" RESOURCE_VARIABLE = "Variables" RESOURCE_WEBSITE = "Website" RESOURCE_XCOM = "XComs" RESOURCE_USERINFO_EDIT_VIEW = "UserInfoEditView" RESOURCE_RESET_MY_PASSWORD_VIEW = "ResetMyPasswordView" RESOURCE_USER_DB_MODELVIEW = "UserDBModelView" RESOURCE_USER_OID_MODELVIEW = "UserOIDModelView" RESOURCE_USER_LDAP_MODELVIEW = "UserLDAPModelView" RESOURCE_USER_OAUTH_MODELVIEW = "UserOAuthModelView" RESOURCE_USER_REMOTEUSER_MODELVIEW = "UserRemoteUserModelView" RESOURCE_ROLE_MODEL_VIEW = "RoleModelView" RESOURCE_PERMISSION_MODEL_VIEW = "PermissionModelView" # Action Constants ACTION_CAN_LIST = "can_list" ACTION_CAN_SHOW = "can_show" ACTION_CAN_CREATE = "can_create" ACTION_CAN_READ = "can_read" ACTION_CAN_EDIT = "can_edit" ACTION_CAN_DELETE = "can_delete" ACTION_CAN_ACCESS_MENU = "menu_access" ACTION_CAN_THIS_FORM_GET = "can_this_form_get" ACTION_CAN_THIS_FORM_POST = "can_this_form_post" ACTION_RESETMYPASSWORD = "resetmypassword" ACTION_CAN_USERINFO = "can_userinfo" ACTION_USERINFOEDIT = "userinfoedit" DEPRECATED_ACTION_CAN_DAG_READ = "can_dag_read" DEPRECATED_ACTION_CAN_DAG_EDIT = "can_dag_edit"
py
1a49e1482b4d2fd0c35a2a319afc12cc6df302c8
import pytest from oh import Config, ParseError def test_interpolation(): with pytest.raises(ParseError): Config.from_str( """ [a] foo = "hello" [b] bar = ${foo} """ ) c = Config.from_str( """ [a] foo = "hello" [b] bar = ${a.foo} """ ) assert c["b"]["bar"] == "hello" c = Config.from_str( """ [a] foo = "hello" [b] bar = "${a.foo}!" """ ) assert c["b"]["bar"] == "hello!" with pytest.raises(ParseError): Config.from_str( """ [a] foo = "hello" [b] bar = ${a.foo}! """ ) with pytest.raises(ParseError): Config.from_str( """ [a] foo = 15 [b] bar = ${a.foo}! """ ) c = Config.from_str( """ [a] foo = ["x", "y"] [b] bar = ${a.foo} """ ) assert c["b"]["bar"] == ["x", "y"] # Interpolation within the same section c = Config.from_str( """ [a] foo = "x" bar = ${a.foo} baz = "${a.foo}y" """ ) assert c["a"]["bar"] == "x" assert c["a"]["baz"] == "xy" # multiple string interpolations c = Config.from_str( """ [a] x = "x" y = 1 z = 3.14159 zz = "foo ${a.x} ${a.y} ${a.z}" """ ) assert c.a.zz == "foo x 1 3.14159" # test all types c = Config.from_str( """ [a] int = 42 float = 3.14159 str = "foobar" bool = true null = null x = "${a.int} ${a.float} ${a.str} ${a.bool} ${a.null}" y = {"a": ${a.int}, "b": ${a.float}, "c": ${a.str}, "d": ${a.bool}, "e": ${a.null}} z = [${a.int}, ${a.float}, ${a.str}, ${a.bool}, ${a.null}] """ ) assert c.a.x == "42 3.14159 foobar True None" assert c.a.y == {"a": 42, "b": 3.14159, "c": "foobar", "d": True, "e": None} assert c.a.z == [42, 3.14159, "foobar", True, None] # leading and trailing text c = Config.from_str( """ [a] b = "ergo" c = "cogito ${a.b} sum" """ ) assert c.a.c == "cogito ergo sum" # trailing spaces c = Config.from_str( """ [a] b = "zip" c = ${a.b} """ ) assert c.a.c == "zip" c = Config.from_str( """ [a] b = "zip" c = "${a.b}${a.b}" """ ) assert c.a.c == "zipzip" with pytest.raises(ParseError): Config.from_str( """ [a] b = "zip" c = ${a.b}${a.b} """ ) # nested data in string interpolation with pytest.raises(ParseError): Config.from_str( """ [a] b = 1 c = "fof" [d] e = "${a}" """ ) # wrong order with pytest.raises(ParseError): Config.from_str( """ [a] b = ${d} [d] e = 1 """ ) # cyclic references with pytest.raises(ParseError): Config.from_str( """ [a] b = ${d.e} [d] e = ${a.b} """ ) # chained references c = Config.from_str( """ [a] b = 1 [c] d = ${a} [e] f = ${c.d.b} [g] h = ${e.f} """ ) assert c.flat["a.b"] == 1 assert c.flat["c.d"] == {"b": 1} assert c.flat["e.f"] == 1 assert c.flat["g.h"] == 1 def test_interpolation_lists(): """Test that lists are preserved correctly""" c = Config.from_str( """ [a] b = 1 [c] d = ["hello ${a.b}", "world"] """, interpolate=False ) assert c["c"]["d"] == ["hello ${a.b}", "world"] c = Config.from_str( """ [a] b = 1 [c] d = ["hello ${a.b}", "world"] """ ) assert c["c"]["d"] == ["hello 1", "world"] c = Config.from_str( """ [a] b = 1 [c] d = [${a.b}, "hello ${a.b}", "world"] """, interpolate=False ) assert c["c"]["d"] == [{"@ref": "a.b"}, "hello ${a.b}", "world"] c = Config.from_str( """ [a] b = 1 [c] d = [${a.b}, "hello ${a.b}", "world"] """ ) assert c["c"]["d"] == [1, "hello 1", "world"] c = Config.from_str( """ [a] b = 1 [c] d = ["hello", ${a}] """ ) assert c["c"]["d"] == ["hello", {"b": 1}] with pytest.raises(ParseError): Config.from_str( """ [a] b = 1 [c] d = ["hello", "hello ${a}"] """ ) c = Config.from_str( """ [a] b = 1 [c] d = ["hello", {"x": ["hello ${a.b}"], "y": 2}] """ ) assert c["c"]["d"] == ["hello", {"x": ["hello 1"], "y": 2}] c = Config.from_str( """ [a] b = 1 [c] d = ["hello", {"x": [${a.b}], "y": 2}] """ ) assert c["c"]["d"] == ["hello", {"x": [1], "y": 2}] c = Config.from_str( """ [a] b = 1 c = "fof" [d] e = ${a} """ ) assert c.d.e == {"b": 1, "c": "fof"} def test_no_interpolation(): """Test that interpolation is correctly preserved.""" c = Config.from_str( """ [a] b = 1 [c] d = "${a.b}" e = "hello${a.b}" f = ${a} """, interpolate=False ) assert c["c"]["d"] == "${a.b}" assert c["c"]["e"] == "hello${a.b}" assert c["c"]["f"] == {"@ref": "a"} d = Config.from_str(c.to_str(), interpolate=True) assert d["c"]["d"] == "1" assert d["c"]["e"] == "hello1" assert d["c"]["f"] == {"b": 1} c = Config.from_str( """ [a] b = 1 [c.d] @ref = a """ ) assert c.flat["a.b"] == 1 assert c.flat["c.d"] == {"@ref": "a"} d = Config.from_str(c.to_str(), interpolate=True) assert d.flat["a.b"] == 1 assert d.flat["c.d"] == {"b": 1}
py
1a49e2d6ef66e2e01e450a11627c38e4c8588c88
#!/usr/bin/env python3 # Copyright 2009-2017 BHG http://bw.org/ def main(): seq = range(11) seq2 = [x * 2 for x in seq] seq3 = [x for x in seq if x % 3 != 0] # we get only the elements that are not divisible by 3 seq4 = [(x, x ** 2) for x in seq] print_list(seq) print_list(seq2) print_list(seq3) print_list(seq4) from math import pi seq5 = [round(pi, i) for i in seq] print_list(seq5) # can also create a dictionary seq6 = {x: x**2 for x in seq} print(seq6) # can also create a set seq7 = {x for x in 'superduper' if x not in 'pd'} print_list(seq7) def print_list(o): for x in o: print(x, end=' ') print() if __name__ == '__main__': main()
py
1a49e3083d2b9ea5547e9f9a1f53dcab3f8e5695
# Copyright The Linux Foundation and each contributor to CommunityBridge. # SPDX-License-Identifier: MIT import pytest import cla import pynamodb from unittest.mock import Mock, patch, MagicMock from cla.models.dynamo_models import GitHubOrg, GitHubOrgModel from cla.utils import get_github_organization_instance from cla.tests.unit.data import GH_TABLE_DESCRIPTION PATCH_METHOD = "pynamodb.connection.Connection._make_api_call" @pytest.fixture() def gh_instance(): """ GitHubOrg instance """ with patch(PATCH_METHOD) as req: req.return_value = GH_TABLE_DESCRIPTION gh_org = cla.utils.get_github_organization_instance() gh_name = "FOO" gh_org.set_organization_name(gh_name) gh_org.set_organization_sfid("foo_sf_id") gh_org.set_project_sfid("foo_sf_id") gh_org.save() yield gh_org def test_set_organization_name(gh_instance): """ Test setting GitHub org name #1126 """ assert gh_instance.get_organization_name_lower() == "foo" def test_get_org_by_name_lower(gh_instance): """ Test getting GitHub org with case insensitive search """ gh_org = cla.utils.get_github_organization_instance() gh_org.model.scan = Mock(return_value=[gh_instance.model]) found_gh_org = gh_org.get_organization_by_lower_name(gh_instance.get_organization_name()) assert found_gh_org.get_organization_name_lower() == gh_instance.get_organization_name_lower()
py
1a49e40e9fefce45f42850fcc21ce87b1f2f7317
""" Test pickling of c-wrappers """ from csb.io import load, dump from isdhic import NBList resolution = 500 with open('../scripts/chrX_cell1_{}kb.py'.format(resolution)) as script: exec script forcefield = posterior['tsallis'].forcefield nblist = forcefield.nblist dump(nblist, '/tmp/nblist.pkl') nblist2 = load('/tmp/nblist.pkl') print nblist.ctype.update(posterior.params['coordinates'].get().reshape(-1,3),1) print nblist2.ctype.update(posterior.params['coordinates'].get().reshape(-1,3),1) dump(forcefield, '/tmp/forcefield.pkl') forcefield2 = load('/tmp/forcefield.pkl') print forcefield.energy(posterior.params['coordinates'].get().reshape(-1,3)) print forcefield2.energy(posterior.params['coordinates'].get().reshape(-1,3)) posterior['tsallis'].q = 1.03 posterior['rog'].beta = 0.2 posterior['contacts'].beta = 0.2 dump(posterior, '/tmp/posterior.pkl') posterior2 = load('/tmp/posterior.pkl') for p in posterior: print p.name, posterior[p.name].log_prob(), posterior2[p.name].log_prob()
py
1a49e45fccbac224324040d23eaa3e598e358271
# Generated by Django 3.2.5 on 2021-07-29 20:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('trace_cockpit', '0004_traceconfig_max_event_count_per_module'), ] operations = [ migrations.AddField( model_name='tracelog', name='http_method', field=models.CharField(default='', max_length=1024), ), ]
py
1a49e5fe07248f4cdf4ee33ffa8461be58432ceb
'''5.WAP to input a list and arrange the list in ascending order with bubble sort''' l=eval(input("Enter the list: ")) for j in range(0,len(l)): for i in range(0,len(l)-1): if(l[i]>l[i+1]): l[i+1],l[i]=l[i],l[i+1] print(l)
py
1a49e6ea63b9ab131cde4ff21c9afb16a65c3d3c
# messageBox.py import ctypes user_handle = ctypes.WinDLL("User32.dll") # Handle to User32.dll kernel_handle = ctypes.WinDLL("kernel32.dll") # Handle to Kernel32.dll # WinAPI: MessageBoxW hWnd = None lpText = "Message Box" lpCaption = "Pop Up" uType = 0x00000001 response = user_handle.MessageBoxW(hWnd, lpText, lpCaption, uType) # Error Handling error = kernel_handle.GetLastError() if error != 0: print("[-] Error Code: {0}".format(error)) if response == 1: print("[+] User Clicked OK") elif response == 2: print("[+] User Clicked CANCEL")
py
1a49e73d33b8bd7199e84646aedfe4c0410fdde1
class ServiceAuthError(Exception): pass class NeedLogin(Exception): pass
py
1a49e772474f05d11296b6857de06647bba9ed56
import numpy as np import utils import copy import sys sys.path.append("../utils/") import stats import pandas as pd class CircularWorld(): def __init__(self, noise_location): self.shape = 'circle' if noise_location is not None: self.centers = np.array(pd.read_csv(noise_location)) self.tick_frequency = 125 self.ticks_per_sec = 1000/125 self.us_min_wage_per_tick = 7.25 / (60*60*(1000 / self.tick_frequency)) self.round_length = 0.5 self.max_bonus = 1.25 self.game_length = int(self.round_length * 60 * self.ticks_per_sec) self.min_speed = 17 / float(self.ticks_per_sec) self.max_speed = 57 / float(self.ticks_per_sec) self.pos_limits = { "radius": 207.9098 } self.center_radius = 20 def get_random_position(self): """ >>> w = CircularWorld('../test/') >>> len(w.get_random_position()) 2 """ return stats.random_circle(self.pos_limits['radius'], 1)[0] def get_random_angle(self): """ >>> np.random.seed(1) >>> w = CircularWorld('../test/') >>> np.max([w.get_random_angle() for i in range(10000)]) 359.0 >>> np.min([w.get_random_angle() for i in range(10000)]) 0.0 """ return np.floor(np.random.random() * 360) def get_score(self, pos, time): """ >>> w = CircularWorld('../test/') >>> w.get_score(np.array([2.1,2.9]), 0) 0.0 """ return utils.calculate_score(pos, self.centers[time], self.center_radius, self.pos_limits) if __name__ == "__main__": import doctest doctest.testmod()
py
1a49e81e4f51329c0968e04bc45d70cbb9c2b267
# -*- coding: utf-8 -*- # Copyright (c) 2019 - 2020 Simon Kern # Copyright (c) 2015 - 2020 Holger Nahrstaedt # Copyright (c) 2011, 2015, Chris Lee-Messer # Copyright (c) 2016-2017 The pyedflib Developers # <https://github.com/holgern/pyedflib> # See LICENSE for license details. import numpy as np import sys from datetime import datetime, date from ._extensions._pyedflib import FILETYPE_EDFPLUS, FILETYPE_BDFPLUS, FILETYPE_BDF, FILETYPE_EDF from ._extensions._pyedflib import open_file_writeonly, set_physical_maximum, set_patient_additional, set_digital_maximum from ._extensions._pyedflib import set_birthdate, set_digital_minimum, set_technician, set_recording_additional, set_patientname from ._extensions._pyedflib import set_patientcode, set_equipment, set_admincode, set_gender, set_datarecord_duration, set_number_of_annotation_signals from ._extensions._pyedflib import set_startdatetime, set_starttime_subsecond, set_samplefrequency, set_physical_minimum, set_label, set_physical_dimension from ._extensions._pyedflib import set_transducer, set_prefilter, write_physical_samples, close_file, write_annotation_latin1, write_annotation_utf8 from ._extensions._pyedflib import blockwrite_physical_samples, write_errors, blockwrite_digital_samples, write_digital_short_samples, write_digital_samples, blockwrite_digital_short_samples __all__ = ['EdfWriter'] def u(x): return x.decode("utf-8", "strict") def du(x): if isbytestr(x): return x else: return x.encode("utf-8") def isstr(s): try: return isinstance(s, basestring) except NameError: return isinstance(s, str) def isbytestr(s): return isinstance(s, bytes) def gender2int(gender): if isinstance(gender, int) or gender is None: return gender elif gender.lower() in ['', 'x', 'xx', 'xxx', 'unknown', '?', '??']: return None elif gender.lower() in ["female", "woman", "f", "w"]: return 0 elif gender.lower() in ["male", "man", "m"]: return 1 else: raise ValueError("Unknown gender: '{}'".format(gender)) class ChannelDoesNotExist(Exception): def __init__(self, value): self.parameter = value def __str__(self): return repr(self.parameter) class WrongInputSize(Exception): def __init__(self, value): self.parameter = value def __str__(self): return repr(self.parameter) class EdfWriter(object): def __exit__(self, exc_type, exc_val, ex_tb): self.close() def __enter__(self): return self # return self def __del__(self): self.close() def __init__(self, file_name, n_channels, file_type=FILETYPE_EDFPLUS): """Initialises an EDF file at file_name. file_type is one of edflib.FILETYPE_EDFPLUS edflib.FILETYPE_BDFPLUS n_channels is the number of channels without the annotation channel channel_info should be a list of dicts, one for each channel in the data. Each dict needs these values: 'label' : channel label (string, <= 16 characters, must be unique) 'dimension' : physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : sample frequency in hertz (int) 'physical_max' : maximum physical value (float) 'physical_min' : minimum physical value (float) 'digital_max' : maximum digital value (int, -2**15 <= x < 2**15) 'digital_min' : minimum digital value (int, -2**15 <= x < 2**15) """ self.path = file_name self.file_type = file_type self.patient_name = '' self.patient_code = '' self.technician = '' self.equipment = '' self.recording_additional = '' self.patient_additional = '' self.admincode = '' self.gender = None self.recording_start_time = datetime.now().replace(microsecond=0) self.birthdate = '' self.duration = 1 self.number_of_annotations = 1 if file_type in [FILETYPE_EDFPLUS, FILETYPE_BDFPLUS] else 0 self.n_channels = n_channels self.channels = [] self.sample_buffer = [] for i in np.arange(self.n_channels): if self.file_type == FILETYPE_BDFPLUS or self.file_type == FILETYPE_BDF: self.channels.append({'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100, 'physical_max': 1.0, 'physical_min': -1.0, 'digital_max': 8388607,'digital_min': -8388608, 'prefilter': 'pre1', 'transducer': 'trans1'}) elif self.file_type == FILETYPE_EDFPLUS or self.file_type == FILETYPE_EDF: self.channels.append({'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100, 'physical_max': 1.0, 'physical_min': -1.0, 'digital_max': 32767, 'digital_min': -32768, 'prefilter': 'pre1', 'transducer': 'trans1'}) self.sample_buffer.append([]) self.handle = open_file_writeonly(self.path, self.file_type, self.n_channels) if (self.handle < 0): raise IOError(write_errors[self.handle]) def update_header(self): """ Updates header to edffile struct """ set_technician(self.handle, du(self.technician)) set_recording_additional(self.handle, du(self.recording_additional)) set_patientname(self.handle, du(self.patient_name)) set_patientcode(self.handle, du(self.patient_code)) set_patient_additional(self.handle, du(self.patient_additional)) set_equipment(self.handle, du(self.equipment)) set_admincode(self.handle, du(self.admincode)) set_gender(self.handle, gender2int(self.gender)) set_datarecord_duration(self.handle, self.duration) set_number_of_annotation_signals(self.handle, self.number_of_annotations) set_startdatetime(self.handle, self.recording_start_time.year, self.recording_start_time.month, self.recording_start_time.day, self.recording_start_time.hour, self.recording_start_time.minute, self.recording_start_time.second) # subseconds are noted in nanoseconds, so we multiply by 100 if self.recording_start_time.microsecond>0: set_starttime_subsecond(self.handle, self.recording_start_time.microsecond*100) if isstr(self.birthdate): if self.birthdate != '': birthday = datetime.strptime(self.birthdate, '%d %b %Y').date() set_birthdate(self.handle, birthday.year, birthday.month, birthday.day) else: set_birthdate(self.handle, self.birthdate.year, self.birthdate.month, self.birthdate.day) for i in np.arange(self.n_channels): set_samplefrequency(self.handle, i, self.channels[i]['sample_rate']) set_physical_maximum(self.handle, i, self.channels[i]['physical_max']) set_physical_minimum(self.handle, i, self.channels[i]['physical_min']) set_digital_maximum(self.handle, i, self.channels[i]['digital_max']) set_digital_minimum(self.handle, i, self.channels[i]['digital_min']) set_label(self.handle, i, du(self.channels[i]['label'])) set_physical_dimension(self.handle, i, du(self.channels[i]['dimension'])) set_transducer(self.handle, i, du(self.channels[i]['transducer'])) set_prefilter(self.handle, i, du(self.channels[i]['prefilter'])) def setHeader(self, fileHeader): """ Sets the file header """ self.technician = fileHeader["technician"] self.recording_additional = fileHeader["recording_additional"] self.patient_name = fileHeader["patientname"] self.patient_additional = fileHeader["patient_additional"] self.patient_code = fileHeader["patientcode"] self.equipment = fileHeader["equipment"] self.admincode = fileHeader["admincode"] self.gender = fileHeader["gender"] self.recording_start_time = fileHeader["startdate"] self.birthdate = fileHeader["birthdate"] self.update_header() def setSignalHeader(self, edfsignal, channel_info): """ Sets the parameter for signal edfsignal. channel_info should be a dict with these values: 'label' : channel label (string, <= 16 characters, must be unique) 'dimension' : physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : sample frequency in hertz (int) 'physical_max' : maximum physical value (float) 'physical_min' : minimum physical value (float) 'digital_max' : maximum digital value (int, -2**15 <= x < 2**15) 'digital_min' : minimum digital value (int, -2**15 <= x < 2**15) """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal] = channel_info self.update_header() def setSignalHeaders(self, signalHeaders): """ Sets the parameter for all signals Parameters ---------- signalHeaders : array_like containing dict with 'label' : str channel label (string, <= 16 characters, must be unique) 'dimension' : str physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : int sample frequency in hertz 'physical_max' : float maximum physical value 'physical_min' : float minimum physical value 'digital_max' : int maximum digital value (-2**15 <= x < 2**15) 'digital_min' : int minimum digital value (-2**15 <= x < 2**15) """ for edfsignal in np.arange(self.n_channels): self.channels[edfsignal] = signalHeaders[edfsignal] self.update_header() def setTechnician(self, technician): """ Sets the technicians name to `technician`. Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action. """ self.technician = technician self.update_header() def setRecordingAdditional(self, recording_additional): """ Sets the additional recordinginfo Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action. """ self.recording_additional = recording_additional self.update_header() def setPatientName(self, patient_name): """ Sets the patientname to `patient_name`. Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action. """ self.patient_name = patient_name self.update_header() def setPatientCode(self, patient_code): """ Sets the patientcode to `patient_code`. Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action. """ self.patient_code = patient_code self.update_header() def setPatientAdditional(self, patient_additional): """ Sets the additional patientinfo to `patient_additional`. Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action. """ self.patient_additional = patient_additional self.update_header() def setEquipment(self, equipment): """ Sets the name of the param equipment used during the aquisition. This function is optional and can be called only after opening a file in writemode and before the first sample write action. Parameters ---------- equipment : str Describes the measurement equpipment """ self.equipment = equipment self.update_header() def setAdmincode(self, admincode): """ Sets the admincode. This function is optional and can be called only after opening a file in writemode and before the first sample write action. Parameters ---------- admincode : str admincode which is written into the header """ self.admincode = admincode self.update_header() def setGender(self, gender): """ Sets the gender. This function is optional and can be called only after opening a file in writemode and before the first sample write action. Parameters ---------- gender : int 1 is male, 0 is female """ self.gender = gender2int(gender) self.update_header() def setDatarecordDuration(self, duration): """ Sets the datarecord duration. The default value is 100000 which is 1 second. ATTENTION: the argument "duration" is expressed in units of 10 microSeconds! So, if you want to set the datarecord duration to 0.1 second, you must give the argument "duration" a value of "10000". This function is optional, normally you don't need to change the default value. The datarecord duration must be in the range 0.001 to 60 seconds. Returns 0 on success, otherwise -1. Parameters ---------- duration : integer Sets the datarecord duration in units of 10 microSeconds Notes ----- This function is NOT REQUIRED but can be called after opening a file in writemode and before the first sample write action. This function can be used when you want to use a samplerate which is not an integer. For example, if you want to use a samplerate of 0.5 Hz, set the samplefrequency to 5 Hz and the datarecord duration to 10 seconds. Do not use this function, except when absolutely necessary! """ self.duration = duration self.update_header() def set_number_of_annotation_signals(self, number_of_annotations): """ Sets the number of annotation signals. The default value is 1 This function is optional and can be called only after opening a file in writemode and before the first sample write action Normally you don't need to change the default value. Only when the number of annotations you want to write is more than the number of seconds of the duration of the recording, you can use this function to increase the storage space for annotations Minimum is 1, maximum is 64 Parameters ---------- number_of_annotations : integer Sets the number of annotation signals """ number_of_annotations = max((min((int(number_of_annotations), 64)), 1)) self.number_of_annotations = number_of_annotations self.update_header() def setStartdatetime(self, recording_start_time): """ Sets the recording start Time Parameters ---------- recording_start_time: datetime object Sets the recording start Time """ if not isinstance(recording_start_time, datetime): recording_start_time = datetime.strptime(recording_start_time,"%d %b %Y %H:%M:%S") self.recording_start_time = recording_start_time self.update_header() def setBirthdate(self, birthdate): """ Sets the birthdate. Parameters ---------- birthdate: date object from datetime Examples -------- >>> import pyedflib >>> from datetime import datetime, date >>> f = pyedflib.EdfWriter('test.bdf', 1, file_type=pyedflib.FILETYPE_BDFPLUS) >>> f.setBirthdate(date(1951, 8, 2)) >>> f.close() Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action. """ if isinstance(birthdate, str): birthdate = datetime.strptime(birthdate, "%d.%m.%Y") self.birthdate = birthdate self.update_header() def setSamplefrequency(self, edfsignal, samplefrequency): """ Sets the samplefrequency of signal edfsignal. Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['sample_rate'] = samplefrequency self.update_header() def setPhysicalMaximum(self, edfsignal, physical_maximum): """ Sets the physical_maximum of signal edfsignal. Parameters ---------- edfsignal: int signal number physical_maximum: float Sets the physical maximum Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['physical_max'] = physical_maximum self.update_header() def setPhysicalMinimum(self, edfsignal, physical_minimum): """ Sets the physical_minimum of signal edfsignal. Parameters ---------- edfsignal: int signal number physical_minimum: float Sets the physical minimum Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['physical_min'] = physical_minimum self.update_header() def setDigitalMaximum(self, edfsignal, digital_maximum): """ Sets the maximum digital value of signal edfsignal. Usually, the value 32767 is used for EDF+ and 8388607 for BDF+. Parameters ---------- edfsignal : int signal number digital_maximum : int Sets the maximum digital value Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['digital_max'] = digital_maximum self.update_header() def setDigitalMinimum(self, edfsignal, digital_minimum): """ Sets the minimum digital value of signal edfsignal. Usually, the value -32768 is used for EDF+ and -8388608 for BDF+. Usually this will be (-(digital_maximum + 1)). Parameters ---------- edfsignal : int signal number digital_minimum : int Sets the minimum digital value Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['digital_min'] = digital_minimum self.update_header() def setLabel(self, edfsignal, label): """ Sets the label (name) of signal edfsignal ("FP1", "SaO2", etc.). Parameters ---------- edfsignal : int signal number on which the label should be changed label : str signal label Notes ----- This function is recommended for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['label'] = label self.update_header() def setPhysicalDimension(self, edfsignal, physical_dimension): """ Sets the physical dimension of signal edfsignal ("uV", "BPM", "mA", "Degr.", etc.) :param edfsignal: int :param physical_dimension: str Notes ----- This function is recommended for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['dimension'] = physical_dimension self.update_header() def setTransducer(self, edfsignal, transducer): """ Sets the transducer of signal edfsignal :param edfsignal: int :param transducer: str Notes ----- This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['transducer'] = transducer self.update_header() def setPrefilter(self, edfsignal, prefilter): """ Sets the prefilter of signal edfsignal ("HP:0.1Hz", "LP:75Hz N:50Hz", etc.) :param edfsignal: int :param prefilter: str Notes ----- This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['prefilter'] = prefilter self.update_header() def writePhysicalSamples(self, data): """ Writes n physical samples (uV, mA, Ohm) belonging to one signal where n is the samplefrequency of the signal. data_vec belonging to one signal. The size must be the samplefrequency of the signal. Notes ----- Writes n physical samples (uV, mA, Ohm) from data_vec belonging to one signal where n is the samplefrequency of the signal. The physical samples will be converted to digital samples using the values of physical maximum, physical minimum, digital maximum and digital minimum. The number of samples written is equal to the samplefrequency of the signal. Call this function for every signal in the file. The order is important! When there are 4 signals in the file, the order of calling this function must be: signal 0, signal 1, signal 2, signal 3, signal 0, signal 1, signal 2, etc. All parameters must be already written into the bdf/edf-file. """ return write_physical_samples(self.handle, data) def writeDigitalSamples(self, data): return write_digital_samples(self.handle, data) def writeDigitalShortSamples(self, data): return write_digital_short_samples(self.handle, data) def blockWritePhysicalSamples(self, data): """ Writes physical samples (uV, mA, Ohm) must be filled with samples from all signals where each signal has n samples which is the samplefrequency of the signal. data_vec belonging to one signal. The size must be the samplefrequency of the signal. Notes ----- buf must be filled with samples from all signals, starting with signal 0, 1, 2, etc. one block equals one second The physical samples will be converted to digital samples using the values of physical maximum, physical minimum, digital maximum and digital minimum The number of samples written is equal to the sum of the samplefrequencies of all signals Size of buf should be equal to or bigger than sizeof(double) multiplied by the sum of the samplefrequencies of all signals Returns 0 on success, otherwise -1 All parameters must be already written into the bdf/edf-file. """ return blockwrite_physical_samples(self.handle, data) def blockWriteDigitalSamples(self, data): return blockwrite_digital_samples(self.handle, data) def blockWriteDigitalShortSamples(self, data): return blockwrite_digital_short_samples(self.handle, data) def writeSamples(self, data_list, digital = False): """ Writes physical samples (uV, mA, Ohm) from data belonging to all signals The physical samples will be converted to digital samples using the values of physical maximum, physical minimum, digital maximum and digital minimum. if the samplefrequency of all signals are equal, then the data could be saved into a matrix with the size (N,signals) If the samplefrequency is different, then sample_freq is a vector containing all the different samplefrequencys. The data is saved as list. Each list entry contains a vector with the data of one signal. If digital is True, digital signals (as directly from the ADC) will be expected. (e.g. int16 from 0 to 2048) All parameters must be already written into the bdf/edf-file. """ if (len(data_list) != len(self.channels)): raise WrongInputSize(len(data_list)) if digital: if any([not np.issubdtype(a.dtype, np.integer) for a in data_list]): raise TypeError('Digital = True requires all signals in int') # Check that all channels have different physical_minimum and physical_maximum for chan in self.channels: assert chan['physical_min'] != chan['physical_max'], \ 'In chan {} physical_min {} should be different from '\ 'physical_max {}'.format(chan['label'], chan['physical_min'], chan['physical_max']) ind = [] notAtEnd = True for i in np.arange(len(data_list)): ind.append(0) sampleLength = 0 sampleRates = np.zeros(len(data_list), dtype=np.int32) for i in np.arange(len(data_list)): sampleRates[i] = self.channels[i]['sample_rate'] if (np.size(data_list[i]) < ind[i] + self.channels[i]['sample_rate']): notAtEnd = False sampleLength += self.channels[i]['sample_rate'] dataOfOneSecond = np.array([], dtype=np.int32 if digital else None) while notAtEnd: # dataOfOneSecondInd = 0 del dataOfOneSecond dataOfOneSecond = np.array([], dtype=np.int32 if digital else None) for i in np.arange(len(data_list)): # dataOfOneSecond[dataOfOneSecondInd:dataOfOneSecondInd+self.channels[i]['sample_rate']] = data_list[i].ravel()[int(ind[i]):int(ind[i]+self.channels[i]['sample_rate'])] dataOfOneSecond = np.append(dataOfOneSecond,data_list[i].ravel()[int(ind[i]):int(ind[i]+sampleRates[i])]) # self.writePhysicalSamples(data_list[i].ravel()[int(ind[i]):int(ind[i]+self.channels[i]['sample_rate'])]) ind[i] += sampleRates[i] # dataOfOneSecondInd += sampleRates[i] if digital: success = self.blockWriteDigitalSamples(dataOfOneSecond) else: success = self.blockWritePhysicalSamples(dataOfOneSecond) if success<0: raise IOError('Unknown error while calling blockWriteSamples') for i in np.arange(len(data_list)): if (np.size(data_list[i]) < ind[i] + sampleRates[i]): notAtEnd = False # dataOfOneSecondInd = 0 for i in np.arange(len(data_list)): lastSamples = np.zeros(sampleRates[i], dtype=np.int32 if digital else None) lastSampleInd = int(np.max(data_list[i].shape) - ind[i]) lastSampleInd = int(np.min((lastSampleInd,sampleRates[i]))) if lastSampleInd > 0: lastSamples[:lastSampleInd] = data_list[i].ravel()[-lastSampleInd:] # dataOfOneSecond[dataOfOneSecondInd:dataOfOneSecondInd+self.channels[i]['sample_rate']] = lastSamples # dataOfOneSecondInd += self.channels[i]['sample_rate'] if digital: success = self.writeDigitalSamples(lastSamples) else: success = self.writePhysicalSamples(lastSamples) if success<0: raise IOError('Unknown error while calling writeSamples') # self.blockWritePhysicalSamples(dataOfOneSecond) def writeAnnotation(self, onset_in_seconds, duration_in_seconds, description, str_format='utf-8'): """ Writes an annotation/event to the file """ if self.file_type in [FILETYPE_EDF, FILETYPE_BDF]: raise TypeError('Trying to write annotation to EDF/BDF, must use EDF+/BDF+') if str_format == 'utf-8': if duration_in_seconds >= 0: return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), du(description)) else: return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, du(description)) else: if duration_in_seconds >= 0: return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), u(description).encode('latin1')) else: return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, u(description).encode('latin1')) def close(self): """ Closes the file. """ close_file(self.handle) self.handle = -1
py
1a49e93d5e4d28eb44fe918679233d4a2effd0af
import pytest from galaxy.config import BaseAppConfiguration from galaxy.config.schema import AppSchema from galaxy.exceptions import ConfigurationError # When a config property 'foo' has an attribute 'path_resolves_to', that attribute is a reference to # another property 'bar'. Together, these two properties form a graph where 'foo' and 'bar are # vertices and the reference from 'foo' to 'bar' is a directed edge. # # A schema may have any number of such implicit graphs, each having one or more edges. All together, # they should form a DAG (directed acyclic graph). # # These tests ensure that the graph is loaded correctly for a variety of valid configurations, # whereas an invalid configuration raises an error. def get_schema(app_mapping): return {'mapping': {'_': {'mapping': app_mapping}}} def test_basecase(monkeypatch): # Check that a valid graph is loaded correctly (this graph has 2 components) mock_schema = { 'component1_path0': { 'type': 'str', 'default': 'value0', }, 'component1_path1': { 'type': 'str', 'default': 'value1', 'path_resolves_to': 'component1_path0', }, 'component1_path2': { 'type': 'str', 'default': 'value2', 'path_resolves_to': 'component1_path1', }, 'component2_path0': { 'type': 'str', 'default': 'value3', }, 'component2_path1': { 'type': 'str', 'default': 'value4', 'path_resolves_to': 'component2_path0', }, } monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema)) monkeypatch.setattr(BaseAppConfiguration, '_load_schema', lambda a: AppSchema(None, '_')) config = BaseAppConfiguration() assert config.component1_path0 == 'value0' assert config.component1_path1 == 'value0/value1' assert config.component1_path2 == 'value0/value1/value2' assert config.component2_path0 == 'value3' assert config.component2_path1 == 'value3/value4' def test_resolves_to_invalid_property(monkeypatch): # 'path_resolves_to' should point to an existing property in the schema mock_schema = { 'path0': { 'type': 'str', 'default': 'value0', }, 'path1': { 'type': 'str', 'default': 'value1', 'path_resolves_to': 'invalid', # invalid }, } monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema)) with pytest.raises(ConfigurationError): AppSchema(None, '_').validate_path_resolution_graph() def test_path_resolution_cycle(monkeypatch): # Must be a DAG, but this one has a cycle mock_schema = { 'path0': { 'type': 'str', 'default': 'value0', 'path_resolves_to': 'path2', }, 'path1': { 'type': 'str', 'default': 'value1', 'path_resolves_to': 'path0', }, 'path2': { 'type': 'str', 'default': 'value2', 'path_resolves_to': 'path1', }, } monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema)) with pytest.raises(ConfigurationError): AppSchema(None, '_').validate_path_resolution_graph() def test_path_invalid_type(monkeypatch): # Paths should be strings mock_schema = { 'path0': { 'type': 'str', 'default': 'value0', }, 'path1': { 'type': 'float', # invalid 'default': 'value1', 'path_resolves_to': 'path0', }, } monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema)) with pytest.raises(ConfigurationError): AppSchema(None, '_').validate_path_resolution_graph() def test_resolves_to_invalid_type(monkeypatch): # Paths should be strings mock_schema = { 'path0': { 'type': 'int', # invalid 'default': 'value0', }, 'path1': { 'type': 'str', 'default': 'value1', 'path_resolves_to': 'path0', }, } monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema)) with pytest.raises(ConfigurationError): AppSchema(None, '_').validate_path_resolution_graph() def test_resolves_with_empty_component(monkeypatch): # A path can be None (root path is never None; may be asigned elsewhere) mock_schema = { 'path0': { 'type': 'str', 'default': 'value0', }, 'path1': { 'type': 'str', 'path_resolves_to': 'path0', }, 'path2': { 'type': 'str', 'default': 'value2', 'path_resolves_to': 'path1', }, } monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema)) monkeypatch.setattr(BaseAppConfiguration, '_load_schema', lambda a: AppSchema(None, '_')) config = BaseAppConfiguration() assert config.path0 == 'value0' assert config.path1 == 'value0' assert config.path2 == 'value0/value2'