ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a3aaf8df6a1e868abd2754ed4e66dd251ecc6a7
from base64 import b64encode from base64 import b64decode from threading import local import boto3 import six __all__ = [ '_as_bytes', 'b64_str', 'from_b64_str', '_get_client', '_prefix_alias', ] thread_local = local() thread_local.sessions = {} def _as_bytes(value): if isinstance(value, six.string_types): value = value.encode('utf-8') return value def b64_str(value: bytes): return b64encode(value).decode('utf-8') def from_b64_str(value: str): value = value.encode('utf-8') return b64decode(value) def _get_client(region: str = None, profile: str = None): key = f'{region}-{profile}' client = thread_local.sessions.get(key) if not client: session = boto3.Session(region_name=region, profile_name=profile) client = session.client('kms') thread_local.sessions[key] = client return client def _prefix_alias(alias: str): if not alias.startswith('alias/'): alias = f'alias/{alias}' return alias
py
1a3aaffa55167ef754d8f35549b385a02bc3a1e5
""" Generates a powershell script to install Windows agent - dcos_install.ps1 """ import os import os.path import gen.build_deploy.util as util import gen.template import gen.util import pkgpanda import pkgpanda.util def generate(gen_out, output_dir): print("Generating Powershell configuration files for DC/OS") make_powershell(gen_out, output_dir) def make_powershell(gen_out, output_dir): """Build powershell deployment script and store this at Bootstrap serve""" output_dir = output_dir + '/windows/' pkgpanda.util.make_directory(output_dir) bootstrap_url = gen_out.arguments['bootstrap_url'] if gen_out.arguments['master_discovery'] == 'static': master_list = gen_out.arguments['master_list'] elif gen_out.arguments['master_discovery'] == 'master_http_loadbalancer': master_list = gen_out.arguments['exhibitor_address'] + ':2181' else: master_list = 'zk-1.zk:2181,zk-2.zk:2181,zk-3.zk:2181,zk-4.zk:2181,zk-5.zk:2181' powershell_template_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'powershell/dcos_install.ps1.in') with open(powershell_template_path, 'r') as f: powershell_template = f.read() powershell_script = gen.template.parse_str(powershell_template).render({ 'dcos_image_commit': util.dcos_image_commit, 'generation_date': util.template_generation_date, 'bootstrap_url': bootstrap_url, 'master_list': master_list, }) # Output the dcos install ps1 script install_script_filename = 'dcos_install.ps1' pkgpanda.util.write_string(install_script_filename, powershell_script) pkgpanda.util.write_string(output_dir + install_script_filename, powershell_script) f.close()
py
1a3ab05147393b86ec60b3d02d1dc0f61ef8ca35
#! /usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import math import unittest import torch from botorch import fit_gpytorch_model from botorch.models import SingleTaskGP from botorch.optim.fit import ( OptimizationIteration, fit_gpytorch_scipy, fit_gpytorch_torch, ) from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood NOISE = [0.127, -0.113, -0.345, -0.034, -0.069, -0.272, 0.013, 0.056, 0.087, -0.081] class TestFitGPyTorchModel(unittest.TestCase): def _getModel(self, double=False, cuda=False): device = torch.device("cuda") if cuda else torch.device("cpu") dtype = torch.double if double else torch.float train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1) noise = torch.tensor(NOISE, device=device, dtype=dtype) train_y = torch.sin(train_x.view(-1) * (2 * math.pi)) + noise model = SingleTaskGP(train_x, train_y) mll = ExactMarginalLogLikelihood(model.likelihood, model) return mll.to(device=device, dtype=dtype) def test_fit_gpytorch_model(self, cuda=False, optimizer=fit_gpytorch_scipy): options = {"disp": False, "maxiter": 5} for double in (False, True): mll = self._getModel(double=double, cuda=cuda) mll = fit_gpytorch_model(mll, optimizer=optimizer, options=options) model = mll.model # Make sure all of the parameters changed self.assertGreater(model.likelihood.raw_noise.abs().item(), 1e-3) self.assertLess(model.mean_module.constant.abs().item(), 0.1) self.assertGreater( model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1 ) self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3) # test overriding the default bounds with user supplied bounds mll = self._getModel(double=double, cuda=cuda) mll = fit_gpytorch_model( mll, optimizer=optimizer, options=options, bounds={"likelihood.noise_covar.raw_noise": (1e-1, None)}, ) model = mll.model self.assertGreaterEqual(model.likelihood.raw_noise.abs().item(), 1e-1) self.assertLess(model.mean_module.constant.abs().item(), 0.1) self.assertGreater( model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1 ) self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3) # test tracking iterations mll = self._getModel(double=double, cuda=cuda) if optimizer is fit_gpytorch_torch: options["disp"] = True mll, iterations = optimizer(mll, options=options, track_iterations=True) self.assertEqual(len(iterations), options["maxiter"]) self.assertIsInstance(iterations[0], OptimizationIteration) # test extra param that does not affect loss options["disp"] = False mll = self._getModel(double=double, cuda=cuda) mll.register_parameter( "dummy_param", torch.nn.Parameter( torch.tensor( [5.0], dtype=torch.double if double else torch.float, device=torch.device("cuda" if cuda else "cpu"), ) ), ) mll = fit_gpytorch_model(mll, optimizer=optimizer, options=options) self.assertTrue(mll.dummy_param.grad is None) def test_fit_gpytorch_model_scipy_cuda(self): if torch.cuda.is_available(): self.test_fit_gpytorch_model(cuda=True) def test_fit_gpytorch_model_torch(self, cuda=False): self.test_fit_gpytorch_model(cuda=cuda, optimizer=fit_gpytorch_torch) def test_fit_gpytorch_model_torch_cuda(self): if torch.cuda.is_available(): self.test_fit_gpytorch_model_torch(cuda=True)
py
1a3ab121603bbca637a3208b65253d220de16d83
# SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2015, ARM Limited, Google and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re import os import logging from time import sleep from target_script import TargetScript from android import Screen, System from android.workload import Workload class CameraStartup(Workload): """ Android CameraStartup workload This workload is intended to be used to collect traces of the camera starting up to debug issues related to camera startup. For this reason, the camera app is started after sleeping for 3 seconds after tracingStart. For this same reason, energy cannot be collected since that disconnects USB. """ # Package required by this workload package = 'com.google.android.GoogleCamera' action = 'android.intent.action.MAIN' def __init__(self, test_env): super(CameraStartup, self).__init__(test_env) self._log = logging.getLogger('CameraStartup') self._log.debug('Workload created') def run(self, out_dir, duration_s=10, collect='systrace'): """ Run a camera startup workload :param out_dir: Path to experiment directory where to store results. :type out_dir: str :param duration_s: Duration of test :type duration_s: int :param collect: Specifies what to collect. Possible values: - 'energy' - 'systrace' - 'ftrace' - any combination of the above :type collect: list(str) """ if 'energy' in collect: raise RuntimeError('CameraStartup cannot do energy collection as app is started after tracingStart') self._log.info("Running CameraStartup for {}s and collecting {}".format(duration_s, collect)) # Keep track of mandatory parameters self.out_dir = out_dir self.collect = collect # Unlock device screen (assume no password required) Screen.unlock(self._target) # Set airplane mode System.set_airplane_mode(self._target, on=True) # Set min brightness Screen.set_brightness(self._target, auto=False, percent=0) # Force screen in PORTRAIT mode Screen.set_orientation(self._target, portrait=True) sleep(1) self.tracingStart() # Wait for a few seconds so that you can clear see start of trace and start of camera app sleep(3) # Use the monkey tool to start CameraStartup System.monkey(self._target, self.package) sleep(duration_s) self.tracingStop() # Close the app without clearing the local data to # avoid the dialog to select the account at next start System.force_stop(self._target, self.package, clear=False) # Go back to home screen System.home(self._target) # Set brightness back to auto Screen.set_brightness(self._target, auto=True) # Switch back to screen auto rotation Screen.set_orientation(self._target, auto=True) # Switch off airplane mode System.set_airplane_mode(self._target, on=False)
py
1a3ab13d6cfc9c6b2d89ae118faeb85b8f15a98e
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # # # RMG - Reaction Mechanism Generator # # # # Copyright (c) 2002-2018 Prof. William H. Green ([email protected]), # # Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the 'Software'), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # ############################################################################### import os import numpy as np import logging import shutil from copy import deepcopy import rmgpy from rmgpy.rmg.main import RMG from rmgpy.rmg.model import CoreEdgeReactionModel from rmgpy.data.rmg import getDB from rmgpy.exceptions import InputError ################################################################################ class ExplorerJob(object): def __init__(self, source, pdepjob, explore_tol, energy_tol=np.inf, flux_tol=0.0, bathGas=None, maximumRadicalElectrons=np.inf): self.source = source self.explore_tol = explore_tol self.energy_tol = energy_tol self.flux_tol = flux_tol self.maximumRadicalElectrons = maximumRadicalElectrons self.pdepjob = pdepjob if not hasattr(self.pdepjob,'outputFile'): self.pdepjob.outputFile = None if bathGas: self.bathGas = bathGas elif self.pdepjob.network and self.pdepjob.network.bathGas: self.bathGas = self.pdepjob.network.bathGas else: raise InputError('bathGas not specified in explorer block') def copy(self): """ Return a copy of the explorer job. """ return ExplorerJob( source=deepcopy(self.source), pdepjob=self.pdepjob, explore_tol=self.explore_tol, energy_tol=self.energy_tol, flux_tol=self.flux_tol ) def execute(self, outputFile, plot, format='pdf', print_summary=True, speciesList=None, thermoLibrary=None, kineticsLibrary=None): logging.info('Exploring network...') rmg = RMG() rmg.speciesConstraints = {'allowed' : ['input species', 'seed mechanisms', 'reaction libraries'], 'maximumRadicalElectrons' : self.maximumRadicalElectrons, 'explicitlyAllowedMolecules': []} rmgpy.rmg.input.rmg = rmg reaction_model = CoreEdgeReactionModel() reaction_model.pressureDependence = self.pdepjob reaction_model.pressureDependence.rmgmode = True if outputFile: reaction_model.pressureDependence.outputFile = os.path.dirname(outputFile) kineticsDatabase = getDB('kinetics') thermoDatabase = getDB('thermo') thermoDatabase.libraries['thermojobs'] = thermoLibrary thermoDatabase.libraryOrder.insert(0,'thermojobs') kineticsDatabase.libraries['kineticsjobs'] = kineticsLibrary kineticsDatabase.libraryOrder.insert(0,('kineticsjobs','Reaction Library')) jobRxns = [rxn for rxn in reaction_model.core.reactions] self.jobRxns = jobRxns if outputFile is not None: if not os.path.exists(os.path.join(reaction_model.pressureDependence.outputFile,'pdep')): os.mkdir(os.path.join(reaction_model.pressureDependence.outputFile,'pdep')) else: shutil.rmtree(os.path.join(reaction_model.pressureDependence.outputFile,'pdep')) os.mkdir(os.path.join(reaction_model.pressureDependence.outputFile,'pdep')) # get the molecular formula for the network mmol = None for spc in self.source: if mmol: mmol.merge(spc.molecule[0]) else: mmol = spc.molecule[0] form = mmol.getFormula() for spec in self.bathGas.keys()+self.source: nspec,isNew = reaction_model.makeNewSpecies(spec,reactive=False) flags = np.array([s.molecule[0].getFormula()==form for s in reaction_model.core.species]) reaction_model.enlarge(nspec,reactEdge=False,unimolecularReact=flags, bimolecularReact=np.zeros((len(reaction_model.core.species),len(reaction_model.core.species)))) reaction_model.addSeedMechanismToCore('kineticsjobs') for lib in kineticsDatabase.libraryOrder: if lib[0] != 'kineticsjobs': reaction_model.addReactionLibraryToEdge(lib[0]) for spc in reaction_model.core.species: for i,item in enumerate(self.source): if spc.isIsomorphic(item): self.source[i] = spc # react initial species flags = np.array([s.molecule[0].getFormula()==form for s in reaction_model.core.species]) reaction_model.enlarge(reactEdge=True,unimolecularReact=flags, bimolecularReact=np.zeros((len(reaction_model.core.species),len(reaction_model.core.species)))) # find the network we're interested in for nwk in reaction_model.networkList: if set(nwk.source) == set(self.source): self.source = nwk.source network = nwk break else: raise ValueError('Did not generate a network with the requested source. This usually means no unimolecular' 'reactions were generated for the source. Note that library reactions that are not' ' properly flagged as elementary_high_p can replace RMG generated reactions that would' ' otherwise be part of networks.') network.bathGas = self.bathGas self.network = network # determine T and P combinations if self.pdepjob.Tlist: Tlist = self.pdepjob.Tlist.value_si else: Tlist = np.linspace(self.pdepjob.Tmin.value_si,self.pdepjob.Tmax.value_si,self.pdepjob.Tcount) if self.pdepjob.Plist: Plist = self.pdepjob.Plist.value_si else: Plist = np.linspace(self.pdepjob.Pmin.value_si,self.pdepjob.Pmax.value_si,self.pdepjob.Pcount) # generate the network forbiddenStructures = getDB('forbidden') incomplete = True while incomplete: incomplete = False for T in Tlist: for P in Plist: if network.getLeakCoefficient(T=T,P=P) > self.explore_tol: incomplete = True spc = network.getMaximumLeakSpecies(T=T,P=P) if forbiddenStructures.isMoleculeForbidden(spc.molecule[0]): reaction_model.removeSpeciesFromEdge(reaction_model.reactionSystems,spc) reaction_model.removeEmptyPdepNetworks() logging.error(spc.label) else: logging.info('adding new isomer {0} to network'.format(spc)) flags = np.array([s.molecule[0].getFormula()==form for s in reaction_model.core.species]) reaction_model.enlarge((network,spc),reactEdge=False,unimolecularReact=flags, bimolecularReact=np.zeros((len(reaction_model.core.species),len(reaction_model.core.species)))) flags = np.array([s.molecule[0].getFormula()==form for s in reaction_model.core.species]) reaction_model.enlarge(reactEdge=True,unimolecularReact=flags, bimolecularReact=np.zeros((len(reaction_model.core.species),len(reaction_model.core.species)))) rmRxns = [] for rxn in network.pathReactions: # remove reactions with forbidden species for r in rxn.reactants+rxn.products: if forbiddenStructures.isMoleculeForbidden(r.molecule[0]): rmRxns.append(rxn) for rxn in rmRxns: logging.info('Removing forbidden reaction: {0}'.format(rxn)) network.pathReactions.remove(rxn) # clean up output files if outputFile is not None: path = os.path.join(reaction_model.pressureDependence.outputFile,'pdep') for name in os.listdir(path): if name.endswith('.py') and '_' in name: if name.split('_')[-1].split('.')[0] != str(len(network.isomers)): os.remove(os.path.join(path,name)) else: os.rename(os.path.join(path,name),os.path.join(path,'network_full.py')) warns = [] for rxn in jobRxns: if rxn not in network.pathReactions: warns.append('Reaction {0} in the input file was not explored during network expansion and was not included in the full network. This is likely because your explore_tol value is too high.'.format(rxn)) # reduction process if self.energy_tol != np.inf or self.flux_tol != 0.0: rxnSet = None for T in Tlist: if self.energy_tol != np.inf: rxns = network.get_energy_filtered_reactions(T,self.energy_tol) if rxnSet is not None: rxnSet &= set(rxns) else: rxnSet = set(rxns) for P in Plist: if self.flux_tol != 0.0: rxns = network.get_rate_filtered_reactions(T,P,self.flux_tol) if rxnSet is not None: rxnSet &= set(rxns) else: rxnSet = set(rxns) logging.info('removing reactions during reduction:') for rxn in rxnSet: logging.info(rxn) network.remove_reactions(reaction_model,list(rxnSet)) for rxn in jobRxns: if rxn not in network.pathReactions: warns.append('Reaction {0} in the input file was not included in the reduced model.'.format(rxn)) self.network = network self.pdepjob.network = network self.pdepjob.execute(outputFile, plot, format='pdf', print_summary=True) if warns != []: logging.info('\nOUTPUT WARNINGS:\n') for w in warns: logging.warning(w)
py
1a3ab16e00901b636cbeb9e3f494d5ad3e1642dc
import psycopg2 import psycopg2.extras from ..sql import SqlMinqlClient class PostgresqlMinqlClient(SqlMinqlClient): def __init__(self, address, name, user, password, *args, **kwargs): url, port = address.split(':') params = "dbname='%s' user='%s' password='%s' host='%s' port='%s'" % (name, user, password, url, port) self.connection = psycopg2.connect(params) self.print_values_query = False super(PostgresqlMinqlClient, self).__init__(*args, **kwargs) # TODO find out how to do the same in hyperdex # and add it to the interface? def get_tables(): cur = self.connection.cursor() cur.execute("""SELECT datname from pg_database""") rows = cur.fetchall() tables = [] for row in rows: tables.append(row[0]) return tables def create_table(self, table_name, schema): print('Creating PostgreSQL table %s' % table_name) attrs = [] for key, value in schema.iteritems(): attr = '"%s" ' % key if value['type'] == 'string': attr += 'VARCHAR(500)' elif value['type'] == 'float': attr += 'REAL' elif value['type'] == 'int': attr += 'INT' elif value['type'] == 'text': attr += 'TEXT' else: raise NotImplementedError if value['required']: attr += ' NOT NULL' attrs.append(attr) query = ''' CREATE TABLE "%s" ( id VARCHAR(100) PRIMARY KEY NOT NULL''' % table_name if attrs: query += ', \n' + ', \n'.join(attrs) query += ');' cur = self.connection.cursor() print(query) cur.execute(query) self.connection.commit() for key, value in schema.iteritems(): if 'index' in value and value['index']: query = 'CREATE INDEX "%s_%s_index" ON "%s" ("%s");' % ( table_name, key, table_name, key) print(query) cur = self.connection.cursor() cur.execute(query) self.connection.commit() # TODO postgres doesn't drop tables def _drop_table(self, table_name): print('postgres drop table', table_name) query = 'DROP TABLE IF EXISTS "%s"' % table_name print(query) cur = self.connection.cursor() self.connection.set_isolation_level(0) # cur.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % ( # table_name, table_name)) cur.execute(query) self.connection.commit() def get_criteria_string(self, criteria): if criteria: crit = [] for attr, value in criteria.iteritems(): if type(value) is dict: # TODO replace 'ge' with '>=' everywhere if 'ge' in value: criterion = '%s >= %s' % (attr, str(value['ge'])) if 'le' in value: criterion = '%s <= %s' % (attr, str(value['le'])) if 'gt' in value: criterion = '%s > %s' % (attr, str(value['gt'])) if 'lt' in value: criterion = '%s < %s' % (attr, str(value['lt'])) elif isinstance(value, basestring): criterion = "%s = '%s'" % (attr, value) else: criterion = '%s = %s' % (attr, str(value)) crit.append(criterion) return ' where ' + ' and '.join(crit) else: return '' def search(self, table_name, criteria={}): cur = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) query = 'SELECT * from "%s"' % table_name query += self.get_criteria_string(criteria) print(query) cur.execute(query) return cur.fetchall() def delete(self, table_name, criteria): cur = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) query = 'DELETE FROM "%s"' % table_name query += self.get_criteria_string(criteria) print(query) cur.execute(query) def update(self, table_name, row): assert 'id' in row and row['id'], 'The row needs an id field.' cur = self.connection.cursor() updates = [] for key, value in row.iteritems(): if key != 'id': if isinstance(value, basestring): val = "'%s'" % value else: val = str(value) updates.append('%s = %s' % (key, val) ) prequery = 'UPDATE "%s"' % table_name query = '%s SET %s' % ( prequery, ', '.join(updates)) postquery = " where id = '%s'" % row['id'] query += postquery if self.print_values_query: print(query) else: print(prequery, postquery) cur.execute(query) self.connection.commit() return row def insert(self, table_name, row): cur = self.connection.cursor() values = [] for value in row.values(): if isinstance(value, basestring): values.append( "'%s'" % value ) else: values.append( str(value) ) query = 'INSERT INTO "%s" (%s)' % ( table_name, ', '.join(row.keys()), ) print(query) query = '%s VALUES (%s)' % ( query, ', '.join(values) ) if self.print_values_query: print(query) cur.execute(query) self.connection.commit() return row def _get(self, table_name, id): cur = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) query = 'SELECT * from "%s"' % table_name query += " where id = '%s'" % id print(query) cur.execute(query) return cur.fetchone()
py
1a3ab1f857bc46176f4183844196fe178b95d036
import random from models import model def create_room(room_type, room_name, dojo): """ input : room_type -> string represent type of room_type room_name -> string represent name of room_name output : returns -> return Room with name -> room_name Raises -> TypeError if room_name exists 'Invalid name ' if room_name exists """ # remove excess white charcters room_name_stripped = room_name.strip() room_type_stripped = room_type.strip() if len(room_type_stripped) == 0: raise TypeError room_type_cleaned = room_type_stripped if len(room_name_stripped) == 0 or not room_name_stripped.isalnum(): return 'Invalid name' room_name_cleaned = room_name_stripped # map room_type to respective data type datatype = {'office': model.Office, 'livingspace': model.LivingSpace} if not room_type_cleaned.lower() in datatype: raise TypeError if room_name_cleaned in dojo.takken_names: return 'duplicates' return datatype[room_type_cleaned.lower()](room_name_cleaned) def add_person(names, person_type, wants_livingspace='N'): """ input: firstname lastname Fellow/Staff [Y] """ # validate fields data types if not isinstance(names, tuple) or not isinstance(person_type, str) or\ not isinstance(wants_livingspace, str): raise TypeError # validate person_type person_type = person_type.lower().strip() if person_type not in ["fellow", "staff"]: raise TypeError # validate name name1 = names[0].strip().lower() name2 = names[1].strip().lower() if not name1.isalnum() or not name2.isalnum(): return "Invalid name" name = name1 + " " + name2 # validate wants_livingspace wants_livingspace = wants_livingspace.strip().lower() if wants_livingspace not in 'yn' and person_type == "fellow": return "Invalid choice" choice = True if wants_livingspace == 'y' else False if person_type == 'staff': new_person = model.Staff(name) new_person.office = False else: new_person = model.Fellow(name, choice) new_person.livingspace = False new_person.office = False new_person.wants_living = False if choice: new_person.wants_living = True return new_person def allocate_room(new_person, dojo): """ allocates a room to new_person Returns a dictionary of status messages about success of adding to rooms """ status_messages = {'office': None, 'livingspace': None} if new_person == 'Invalid name': status_messages['status'] = 'Invalid name' return status_messages elif new_person == "Invalid choice": status_messages['status'] = 'Invalid choice' return status_messages elif isinstance(new_person, model.Fellow): if new_person.wants_living: status_messages['livingspace'] = allocate_livingspace(new_person, dojo=dojo) dojo.add_fellow(new_person) status_messages['person_type'] = 'fellow' else: dojo.add_staff(new_person) status_messages['person_type'] = 'staff' status_messages['office'] = allocate_office(new_person, dojo=dojo) return status_messages def allocate_office(new_person, dojo, name_office=None): ''' allocates office to new person_type Returns name of office if added else None ''' if not name_office: name_office = choose_office_random(dojo) office = dojo.get_office(name_office) if name_office != "NoRoomException" and not office.is_full(): dojo.add_person_office(name_office, new_person) new_person.office = True name_office = office.name else: name_office = None return name_office def allocate_livingspace(new_person, dojo, name_livingspace=None): ''' allocates livingspace to new_person Returns name of living space if added else None ''' if not name_livingspace: name_livingspace = choose_living_space_random(dojo) livingspace = dojo.get_livingspace(name_livingspace) if name_livingspace == "NoRoomException" or livingspace.is_full(): name_livingspace = None elif new_person.wants_living: dojo.add_fellow_living(name_livingspace, new_person) new_person.livingspace = True name_livingspace = livingspace.name else: name_livingspace = None return name_livingspace def choose_office_random(dojo): """ choose an office at random """ number_of_offices = len(dojo.office) if number_of_offices > 0: index = random.randrange(number_of_offices) else: return "NoRoomException" list_offices = list(dojo.office) return list_offices[index].name def choose_living_space_random(dojo): """ choose a livingspace at random """ number_of_livingspace = len(dojo.livingspace) if number_of_livingspace > 0: index = random.randrange(number_of_livingspace) else: return "NoRoomException" list_livingspace = list(dojo.livingspace) return list_livingspace[index].name class NoRoomException(Exception): pass def save_data_text(file_name, data, mode='wt'): if file_name[len(file_name) - 4:] != '.txt': file_name = file_name + '.txt' file_out = open(file_name, mode) for name in data: print(name, file=file_out) file_out.close() def load_data_text(file_name): data = [] raw_data = open(file_name, 'rt') while True: line = raw_data.readline() if not line: break data.append(line.split()) return data def deallocate_person(room_type, person, office=None, livingspace=None): deallocation = None if room_type == 'O' and office: deallocation = deallocate_office(person, office) elif room_type == 'L' and livingspace: deallocation = deallocate_livingspace(person, livingspace) elif room_type == 'L' and isinstance(person, model.Staff): deallocation = 'Invalid Operation' return deallocation def get_roomname_type(room_name, dojo): status_messages = {} room_name = room_name.strip().lower() if room_name not in dojo.takken_names: status_messages['status'] = "Room not found" else: office = dojo.get_office(room_name.strip().lower()) livingspace = dojo.get_livingspace(room_name.strip().lower()) # we can only reallocate one room at a time office or livingspace status_messages['in'] = (office, 'O') if office else (livingspace, 'L') status_messages['status'] = 'ok' return status_messages def deallocate_livingspace(person, room): if isinstance(person, model.Staff): return 'Invalid Operation' if person.is_allocated_living() and person.wants_living: room.remove_occupant(person) person.livingspace = False elif not person.wants_living: return 'Invalid Operation' return 'Done' def deallocate_office(person, room): room.remove_occupant(person) person.office = False return 'Done'
py
1a3ab2745ef6e75e457837582e84c7b10b0ed0a3
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import logging import os from dataclasses import dataclass from typing import Any, Dict, List, Optional, Type import pytorch_lightning as pl # type: ignore from d2go.config import CfgNode, temp_defrost, auto_scale_world_size from d2go.runner import create_runner from d2go.runner.callbacks.quantization import ( QuantizationAwareTraining, ) from d2go.runner.lightning_task import GeneralizedRCNNTask from d2go.setup import basic_argument_parser from d2go.utils.misc import dump_trained_model_configs from detectron2.utils.events import EventStorage from detectron2.utils.file_io import PathManager from pytorch_lightning.callbacks import Callback from pytorch_lightning.callbacks import LearningRateMonitor from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint from pytorch_lightning.loggers import TensorBoardLogger from torch.distributed import get_rank logging.basicConfig(level=logging.INFO) logger = logging.getLogger("detectron2go.lightning.train_net") FINAL_MODEL_CKPT = f"model_final{ModelCheckpoint.FILE_EXTENSION}" @dataclass class TrainOutput: output_dir: str accuracy: Optional[Dict[str, Any]] = None tensorboard_log_dir: Optional[str] = None model_configs: Optional[Dict[str, str]] = None def maybe_override_output_dir(cfg: CfgNode, output_dir: Optional[str]) -> None: """Overrides the output directory if `output_dir` is not None. """ if output_dir is not None and output_dir != cfg.OUTPUT_DIR: cfg.OUTPUT_DIR = output_dir logger.warning( f"Override cfg.OUTPUT_DIR ({cfg.OUTPUT_DIR}) to be the same as " f"output_dir {output_dir}" ) def _get_trainer_callbacks(cfg: CfgNode) -> List[Callback]: """Gets the trainer callbacks based on the given D2Go Config. Args: cfg: The normalized ConfigNode for this D2Go Task. Returns: A list of configured Callbacks to be used by the Lightning Trainer. """ callbacks: List[Callback] = [ LearningRateMonitor(logging_interval="step"), ModelCheckpoint( dirpath=cfg.OUTPUT_DIR, save_last=True, ), ] if cfg.QUANTIZATION.QAT.ENABLED: callbacks.append(QuantizationAwareTraining.from_config(cfg)) return callbacks def _get_accelerator(use_cpu: bool) -> str: return "ddp_cpu" if use_cpu else "ddp" def get_trainer_params(cfg: CfgNode, num_machines: int, num_processes: int) -> Dict[str, Any]: use_cpu = cfg.MODEL.DEVICE.lower() == "cpu" return { # training loop is bounded by max steps, use a large max_epochs to make # sure max_steps is met first "max_epochs": 10 ** 8, "max_steps": cfg.SOLVER.MAX_ITER, "val_check_interval": cfg.TEST.EVAL_PERIOD if cfg.TEST.EVAL_PERIOD > 0 else cfg.SOLVER.MAX_ITER, "num_nodes": num_machines, "gpus": None if use_cpu else num_processes, "num_processes": num_processes, "accelerator": _get_accelerator(use_cpu), "callbacks": _get_trainer_callbacks(cfg), "logger": TensorBoardLogger(save_dir=cfg.OUTPUT_DIR), "num_sanity_val_steps": 0, "progress_bar_refresh_rate": 10, "terminate_on_nan": True, "replace_sampler_ddp": False, } def do_train( cfg: CfgNode, trainer: pl.Trainer, task: GeneralizedRCNNTask ) -> Dict[str, str]: """Runs the training loop with given trainer and task. Args: cfg: The normalized ConfigNode for this D2Go Task. trainer: PyTorch Lightning trainer. task: Lightning module instance. Returns: A map of model name to trained model config path. """ with EventStorage() as storage: task.storage = storage trainer.fit(task) final_ckpt = os.path.join(cfg.OUTPUT_DIR, FINAL_MODEL_CKPT) trainer.save_checkpoint(final_ckpt) # for validation monitor trained_cfg = cfg.clone() with temp_defrost(trained_cfg): trained_cfg.MODEL.WEIGHTS = final_ckpt model_configs = dump_trained_model_configs( cfg.OUTPUT_DIR, {"model_final": trained_cfg} ) return model_configs def do_test(trainer: pl.Trainer, task: GeneralizedRCNNTask): """Runs the evaluation with a pre-trained model. Args: cfg: The normalized ConfigNode for this D2Go Task. trainer: PyTorch Lightning trainer. task: Lightning module instance. """ with EventStorage() as storage: task.storage = storage trainer.test(task) def main( cfg: CfgNode, output_dir: Optional[str] = None, task_cls: Type[GeneralizedRCNNTask] = GeneralizedRCNNTask, eval_only: bool = False, num_machines: int = 1, num_processes: int = 1, ) -> TrainOutput: """Main function for launching a training with lightning trainer Args: cfg: D2go config node num_machines: Number of nodes used for distributed training num_processes: Number of processes on each node. eval_only: True if run evaluation only. """ auto_scale_world_size(cfg, num_machines * num_processes) maybe_override_output_dir(cfg, output_dir) task = task_cls.from_config(cfg, eval_only) trainer_params = get_trainer_params(cfg, num_machines, num_processes) last_checkpoint = os.path.join(cfg.OUTPUT_DIR, "last.ckpt") if PathManager.exists(last_checkpoint): # resume training from checkpoint trainer_params["resume_from_checkpoint"] = last_checkpoint logger.info(f"Resuming training from checkpoint: {last_checkpoint}.") trainer = pl.Trainer(**trainer_params) model_configs = None if eval_only: do_test(trainer, task) else: model_configs = do_train(cfg, trainer, task) return TrainOutput( output_dir=cfg.OUTPUT_DIR, tensorboard_log_dir=trainer_params["logger"].log_dir, accuracy=task.eval_res, model_configs=model_configs, ) def build_config( config_file: str, task_cls: Type[GeneralizedRCNNTask], opts: Optional[List[str]] = None, ) -> CfgNode: """Build config node from config file Args: config_file: Path to a D2go config file output_dir: When given, this will override the OUTPUT_DIR in the config opts: A list of config overrides. e.g. ["SOLVER.IMS_PER_BATCH", "2"] """ cfg = task_cls.get_default_cfg() cfg.merge_from_file(config_file) if opts: cfg.merge_from_list(opts) return cfg def argument_parser(): parser = basic_argument_parser(distributed=True, requires_output_dir=False) parser.add_argument( "--num-gpus", type=int, default=0, help="number of GPUs per machine" ) return parser if __name__ == "__main__": args = argument_parser().parse_args() task_cls = create_runner(args.runner) if args.runner else GeneralizedRCNNTask cfg = build_config(args.config_file, task_cls, args.opts) ret = main( cfg, args.output_dir, task_cls, eval_only=False, # eval_only num_machines=args.num_machines, num_processes=args.num_processes, ) if get_rank() == 0: print(ret)
py
1a3ab291758c26dbf5208b96becd668d335ab290
# Automated tests for the `executor' module. # # Author: Peter Odding <[email protected]> # Last Change: May 21, 2018 # URL: https://executor.readthedocs.io """ Automated tests for the `executor` package. This test suite uses ``sudo`` in several tests. If you don't have passwordless sudo configured you'll notice because you'll get interactive prompts when running the test suite ... Of course the idea behind a test suite is to run non-interactively, so in my personal development environment I have added a custom sudo configuration file ``/etc/sudoers.d/executor-test-suite`` with the following contents:: # /etc/sudoers.d/executor-test-suite: # # Configuration for sudo to run the executor test suite # without getting interactive sudo prompts. # To enable test_sudo_option. peter ALL=NOPASSWD:/bin/chmod 600 /tmp/executor-test-suite/* peter ALL=NOPASSWD:/bin/chown root\:root /tmp/executor-test-suite/* peter ALL=NOPASSWD:/bin/rm -R /tmp/executor-test-suite peter ALL=NOPASSWD:/usr/bin/stat --format=%a /tmp/executor-test-suite/* peter ALL=NOPASSWD:/usr/bin/stat --format=%G /tmp/executor-test-suite/* peter ALL=NOPASSWD:/usr/bin/stat --format=%U /tmp/executor-test-suite/* # To enable test_uid_option and test_user_option. The ALL=(ALL) tokens allow # running the command as any user (because the test suite picks user names # and user IDs more or less at random). peter ALL=(ALL) NOPASSWD:/usr/bin/id * If you want to use this, make sure you change the username and sanity check the locations of the executables whose pathnames have been expanded in the sudo configuration. Happy testing! By the way none of this is relevant on e.g. Travis CI because in that environment passwordless sudo access has been configured. """ # Standard library modules. import datetime import logging import os import pwd import random import shlex import socket import sys import tempfile import time import uuid # External dependencies. from humanfriendly import Timer, compact, dedent from humanfriendly.testing import TemporaryDirectory, TestCase, retry, run_cli from mock import MagicMock from six.moves import StringIO # Modules included in our package. from executor import ( COMMAND_NOT_FOUND_STATUS, DEFAULT_SHELL, DEFAULT_WORKING_DIRECTORY, CommandNotFound, ExternalCommand, ExternalCommandFailed, execute, quote, which, ) from executor.cli import main from executor.concurrent import CommandPool, CommandPoolFailed from executor.contexts import ( ChangeRootContext, create_context, LocalContext, RemoteContext, SecureChangeRootContext, ) from executor.process import ProcessTerminationFailed from executor.chroot import CHROOT_PROGRAM_NAME from executor.schroot import SCHROOT_PROGRAM_NAME from executor.ssh.client import ( DEFAULT_CONNECT_TIMEOUT, RemoteCommand, RemoteCommandFailed, RemoteCommandNotFound, RemoteConnectFailed, foreach, remote, ) from executor.ssh.server import SSHServer MISSING_COMMAND = 'a-program-name-that-no-one-would-ever-use' # Initialize a logger for this module. logger = logging.getLogger(__name__) class ExecutorTestCase(TestCase): """Container for the `executor` test suite.""" def setUp(self): """Set up logging for subprocesses and initialize test directories.""" # Set up our superclass. super(ExecutorTestCase, self).setUp() # Enable verbose logging to the terminal for subprocesses. os.environ['COLOREDLOGS_LOG_LEVEL'] = 'DEBUG' # Create the directory where superuser privileges are tested. self.sudo_enabled_directory = os.path.join(tempfile.gettempdir(), 'executor-test-suite') if not os.path.isdir(self.sudo_enabled_directory): os.makedirs(self.sudo_enabled_directory) def test_double_start(self): """Make sure a command can't be started when it's already running.""" with ExternalCommand('sleep', '1') as cmd: self.assertRaises(ValueError, cmd.start) def test_graceful_termination(self): """Test graceful termination of processes.""" self.check_termination(method='terminate') def test_forceful_termination(self): """Test forceful termination of processes.""" self.check_termination(method='kill') def test_graceful_to_forceful_fallback(self): """Test that graceful termination falls back to forceful termination.""" timer = Timer() expected_lifetime = 60 with NonGracefulCommand('sleep', str(expected_lifetime), check=False) as cmd: # Request graceful termination even though we know it will fail. cmd.terminate(timeout=1) # Verify that the process terminated even though our graceful # termination request was ignored. assert not cmd.is_running # Verify that the process actually terminated due to the fall back # and not because its expected life time simply ran out. assert timer.elapsed_time < expected_lifetime def test_process_termination_failure(self): """Test handling of forceful termination failures.""" with NonForcefulCommand('sleep', '60', check=False) as cmd: # Request forceful termination even though we know it will fail. self.assertRaises(ProcessTerminationFailed, cmd.kill, timeout=1) # Verify that the process is indeed still running :-). assert cmd.is_running # Bypass the overrides to get rid of the process. ExternalCommand.terminate_helper(cmd) def check_termination(self, method): """Helper method for process termination tests.""" with ExternalCommand('sleep', '60', check=False) as cmd: timer = Timer() # We use a positive but very low timeout so that all of the code # involved gets a chance to run, but without slowing us down. getattr(cmd, method)(timeout=0.1) # Gotcha: Call wait() so that the process (our own subprocess) is # reclaimed because until we do so proc.is_running will be True! cmd.wait() # Now we can verify our assertions. assert not cmd.is_running, "Child still running despite graceful termination request!" assert timer.elapsed_time < 10, "It look too long to terminate the child!" def test_iterate_buffered(self): """Make sure we can iterate over a command's buffered output.""" cmd = ExternalCommand( 'for ((i=0; i<10; i++)); do echo $i; done', capture=True, buffered=True, ) for i, line in enumerate(cmd): assert i == int(line) def test_iterate_unbuffered(self): """Make sure we can iterate over a command's unbuffered output.""" cmd = ExternalCommand( 'for ((i=0; i<10; i++)); do echo $i; done', capture=True, buffered=False, ) for i, line in enumerate(cmd): assert i == int(line) def test_program_searching(self): """Make sure which() works as expected.""" assert which('python') assert not which(MISSING_COMMAND) def test_status_code_checking(self): """Make sure that status code handling is sane.""" assert execute('true') is True assert execute('false', check=False) is False # Make sure execute('false') raises an exception. self.assertRaises(ExternalCommandFailed, execute, 'false') # Make sure execute('exit 42') raises an exception. shell_cmd = 'echo -n what; echo -n ever; exit 42' e = self.assertRaises(ExternalCommandFailed, execute, shell_cmd, silent=True) # Make sure the exception has the expected properties. self.assertEqual(e.command.command_line, ['bash', '-c', shell_cmd]) self.assertEqual(e.returncode, 42) self.assertTrue('whatever' in e.error_message) # Make sure the CommandNotFound exception is raised consistently # regardless of the values of the `shell' and `async' options. for async in True, False: for shell in True, False: cmd = ExternalCommand(MISSING_COMMAND, async=async, shell=shell) self.assertRaises(CommandNotFound, cmd.wait) assert cmd.returncode == COMMAND_NOT_FOUND_STATUS def test_shell_opt_out(self): """Test that callers can always opt out of shell evaluation.""" # A command consisting of a single string implies shell evaluation but # you can change that default. assert DEFAULT_SHELL in ExternalCommand('echo 42').command_line assert DEFAULT_SHELL not in ExternalCommand('echo 42', shell=False).command_line # A command consisting of more than one string implies no shell # evaluation but you can change that default. assert DEFAULT_SHELL not in ExternalCommand('echo', '42').command_line assert DEFAULT_SHELL in ExternalCommand('echo', '42', shell=True).command_line # Confirm that opting out of shell evaluation really bypasses all shells. cmd = ExternalCommand( 'echo this will never match an executable name', shell=False, check=False, ) cmd.start() cmd.wait() assert cmd.error_type is CommandNotFound def test_commands_on_stdin(self): """Test that callers can opt in to shell evaluation for local commands given on standard input.""" random_string = uuid.uuid4().hex output = execute(capture=True, shell=True, input='echo %s' % quote(random_string)) assert output == random_string def test_remote_commands_on_stdin(self): """Test that callers can opt in to shell evaluation for remote commands given on standard input.""" random_string = uuid.uuid4().hex with SSHServer() as server: output = remote('127.0.0.1', capture=True, shell=True, input='echo %s' % quote(random_string), **server.client_options) assert output == random_string def test_stdin(self): """Make sure standard input can be provided to external commands.""" assert execute('tr', 'a-z', 'A-Z', input='test', capture=True) == 'TEST' def test_stdout(self): """Make sure standard output of external commands can be captured.""" assert execute('echo', 'this is a test', capture=True) == 'this is a test' assert execute('echo', '-e', r'line 1\nline 2', capture=True) == 'line 1\nline 2\n' # I don't know how to test for the effect of silent=True in a practical # way without creating the largest test in this test suite :-). The # least I can do is make sure the keyword argument is accepted and the # code runs without exceptions in supported environments. assert execute('echo', 'this is a test', silent=True) is True def test_stderr(self): """Make sure standard error of external commands can be captured.""" stdout_value = 'this goes to standard output' stderr_value = 'and this goes to the standard error stream' shell_command = 'echo %s; echo %s >&2' % (stdout_value, stderr_value) cmd = ExternalCommand(shell_command, capture=True, capture_stderr=True) cmd.start() assert stdout_value in cmd.decoded_stdout assert stderr_value in cmd.decoded_stderr def test_output_on_error(self): """Check the combination of output capturing and error handling.""" for shell in True, False: cmd = ExternalCommand(MISSING_COMMAND, capture=True, check=False, shell=shell) cmd.start() cmd.wait() assert cmd.returncode == COMMAND_NOT_FOUND_STATUS assert cmd.stdout == b'' def test_merged_streams(self): """Make sure standard output/error of external commands can be captured together.""" stdout_value = 'this goes to standard output' stderr_value = 'and this goes to the standard error stream' shell_command = 'echo %s; echo %s >&2' % (stdout_value, stderr_value) cmd = ExternalCommand(shell_command, capture=True, merge_streams=True) cmd.start() assert stdout_value in cmd.decoded_stdout assert stderr_value in cmd.decoded_stdout assert stdout_value not in (cmd.decoded_stderr or '') assert stderr_value not in (cmd.decoded_stderr or '') def test_stdout_to_file(self): """Make sure the standard output stream of external commands can be redirected and appended to a file.""" fd, filename = tempfile.mkstemp(prefix='executor-', suffix='-stdout.txt') with open(filename, 'w') as handle: handle.write('existing contents\n') with open(filename, 'a') as handle: execute('echo appended output', stdout_file=handle) # Make sure the file was _not_ removed. assert os.path.isfile(filename) # Make sure the output was appended. with open(filename) as handle: lines = [line.strip() for line in handle] assert lines == ['existing contents', 'appended output'] def test_stderr_to_file(self): """Make sure the standard error stream of external commands can be redirected and appended to a file.""" fd, filename = tempfile.mkstemp(prefix='executor-', suffix='-stderr.txt') with open(filename, 'w') as handle: handle.write('existing contents\n') with open(filename, 'a') as handle: execute('echo appended output 1>&2', stderr_file=handle) # Make sure the file was _not_ removed. assert os.path.isfile(filename) # Make sure the output was appended. with open(filename) as handle: lines = [line.strip() for line in handle] assert lines == ['existing contents', 'appended output'] def test_redirect_without_fd(self): """Test redirection to a file object that doesn't have an associated file descriptor.""" mock_file = StringIO() setattr(mock_file, 'name', '/some/random/path') self.assertRaises(ValueError, execute, 'true', stdout_file=mock_file) def test_redirect_without_name(self): """Test redirection to a file object that doesn't have an associated filename.""" mock_file = StringIO() setattr(mock_file, 'fileno', lambda: 5) self.assertRaises(ValueError, execute, 'true', stdout_file=mock_file) def test_merged_streams_to_file(self): """Make sure the standard streams of external commands can be merged, redirected and appended to a file.""" fd, filename = tempfile.mkstemp(prefix='executor-', suffix='-merged.txt') with open(filename, 'w') as handle: handle.write('existing contents\n') with open(filename, 'a') as handle: execute('echo standard output; echo standard error 1>&2', stdout_file=handle, stderr_file=handle) # Make sure the file was _not_ removed. assert os.path.isfile(filename) # Make sure the output was appended. with open(filename) as handle: lines = [line.strip() for line in handle] assert lines == ['existing contents', 'standard output', 'standard error'] def test_asynchronous_stream_to_file(self): """Make sure the standard streams can be redirected to a file and asynchronously stream output to that file.""" fd, filename = tempfile.mkstemp(prefix='executor-', suffix='-streaming.txt') with open(filename, 'w') as handle: cmd = ExternalCommand('for ((i=0; i<25; i++)); do echo $i; sleep 0.1; done', async=True, stdout_file=handle) cmd.start() def expect_some_output(): """Expect some but not all output to be readable at some point.""" with open(filename) as handle: lines = list(handle) assert len(lines) > 0 assert len(lines) < 25 def expect_all_output(): """Expect all output to be readable at some point.""" with open(filename) as handle: lines = list(handle) assert len(lines) == 25 retry(expect_some_output, 10) retry(expect_all_output, 20) def test_asynchronous_unbuffered_output(self): """Make sure output buffering to temporary files can be disabled.""" cmd = ExternalCommand( *python_golf('import sys', 'sys.stdout.write(sys.stdin.readline().upper())', 'sys.stdout.flush()', 'sys.stdout.write(sys.stdin.readline().upper())'), async=True, buffered=False, capture=True, input=True ) with cmd: # Message the command. first_line = 'Hello world?\n' cmd.stdin.write(first_line.lower().encode('ascii')) # Read and check the response. assert cmd.stdout.readline().decode('ascii') == first_line.upper() # Message the command again. second_line = 'Are you still alive?\n' cmd.stdin.write(second_line.lower().encode('ascii')) assert cmd.stdout.readline().decode('ascii') == second_line.upper() def test_tty_option(self): """Make sure the ``tty`` option works as expected.""" # By default we expect the external command to inherit our standard # input stream (of course this test suite is expected to work # regardless of whether it's connected to a terminal). test_stdin_isatty = python_golf('import sys', 'sys.exit(0 if sys.stdin.isatty() else 1)') assert sys.stdin.isatty() == execute(*test_stdin_isatty, check=False) # If the command's output is being captured then its standard # input stream should be redirected to /dev/null. self.assertRaises(ExternalCommandFailed, execute, *test_stdin_isatty, capture=True) # If the caller explicitly disabled interactive terminal support then # the command's standard input stream should also be redirected to # /dev/null. self.assertRaises(ExternalCommandFailed, execute, *test_stdin_isatty, tty=False) def test_working_directory(self): """Make sure the working directory of external commands can be set.""" with TemporaryDirectory() as directory: self.assertEqual(execute('echo $PWD', capture=True, directory=directory), directory) def test_virtual_environment_option(self): """Make sure Python virtual environments can be used.""" with TemporaryDirectory() as directory: virtual_environment = os.path.join(directory, 'environment') # Create a virtual environment to run the command in. execute('virtualenv', virtual_environment) # This is the expected value of `sys.executable'. expected_executable = os.path.join(virtual_environment, 'bin', 'python') # Get the actual value of `sys.executable' by running a Python # interpreter inside the virtual environment. actual_executable = execute('python', '-c', 'import sys; print(sys.executable)', capture=True, virtual_environment=virtual_environment) # Make sure the values match. assert os.path.samefile(expected_executable, actual_executable) # Make sure that shell commands are also supported (command line # munging inside executor is a bit tricky and I specifically got # this wrong on the first attempt :-). output = execute('echo $VIRTUAL_ENV', capture=True, virtual_environment=virtual_environment) assert os.path.samefile(virtual_environment, output) def test_fakeroot_option(self): """Make sure ``fakeroot`` can be used.""" filename = os.path.join(tempfile.gettempdir(), 'executor-%s-fakeroot-test' % os.getpid()) self.assertTrue(execute('touch', filename, fakeroot=True)) try: self.assertTrue(execute('chown', 'root:root', filename, fakeroot=True)) self.assertEqual(execute('stat', '--format=%U', filename, fakeroot=True, capture=True), 'root') self.assertEqual(execute('stat', '--format=%G', filename, fakeroot=True, capture=True), 'root') self.assertTrue(execute('chmod', '600', filename, fakeroot=True)) self.assertEqual(execute('stat', '--format=%a', filename, fakeroot=True, capture=True), '600') finally: os.unlink(filename) def test_uid_option(self): """ Make sure ``sudo`` can be used to switch users based on a user ID. The purpose of this test is to switch to any user that is not root or the current user and verify that switching worked correctly. It's written this way because I wanted to make the least possible assumptions about the systems that will run this test suite. """ uids_to_ignore = (0, os.getuid()) entry = next(e for e in pwd.getpwall() if e.pw_uid not in uids_to_ignore) output = execute('id', '-u', capture=True, uid=entry.pw_uid) assert output == str(entry.pw_uid) def test_user_option(self): """ Make sure ``sudo`` can be used to switch users based on a username. The purpose of this test is to switch to any user that is not root or the current user and verify that switching worked correctly. It's written this way because I wanted to make the least possible assumptions about the systems that will run this test suite. """ uids_to_ignore = (0, os.getuid()) entry = next(e for e in pwd.getpwall() if e.pw_uid not in uids_to_ignore) output = execute('id', '-u', capture=True, user=entry.pw_name) assert output == str(entry.pw_uid) def test_sudo_option(self): """Make sure ``sudo`` can be used to elevate privileges.""" filename = os.path.join(self.sudo_enabled_directory, 'executor-%s-sudo-test' % os.getpid()) self.assertTrue(execute('touch', filename)) try: self.assertTrue(execute('chown', 'root:root', filename, sudo=True)) self.assertEqual(execute('stat', '--format=%U', filename, sudo=True, capture=True), 'root') self.assertEqual(execute('stat', '--format=%G', filename, sudo=True, capture=True), 'root') self.assertTrue(execute('chmod', '600', filename, sudo=True)) self.assertEqual(execute('stat', '--format=%a', filename, sudo=True, capture=True), '600') finally: self.assertTrue(execute('rm', '-R', self.sudo_enabled_directory, sudo=True)) def test_ionice_option(self): """Make sure ``ionice`` can be used.""" rsync_command_line = ['rsync', '-a', '/', '/mnt/backups/latest/'] expected_ionice_command = ['ionice', '--class', 'idle'] command = ExternalCommand(*rsync_command_line, ionice='idle') assert command.ionice == 'idle' print(command.ionice_command) assert command.ionice_command == expected_ionice_command assert command.command_line == (expected_ionice_command + rsync_command_line) self.assertRaises( ValueError, ExternalCommand, 'touch', 'something-inappropriate', ionice='unknown-class', ) def test_environment_variable_handling(self): """Make sure environment variables can be overridden.""" # Check that environment variables of the current process are passed on to subprocesses. output = execute('echo $PATH', capture=True) assert output == os.environ['PATH'] # Test that environment variable overrides can be given to external commands. output = execute( 'echo $HELLO $WORLD', capture=True, environment=dict( HELLO='Hello', WORLD='world!', ), ) assert output == 'Hello world!' # Test that the environment variables of a command can be modified # after the command has been initialized. cmd = ExternalCommand('echo $DELAYED', capture=True) cmd.environment['DELAYED'] = 'Also works fine' cmd.wait() assert cmd.output == 'Also works fine' def test_simple_async_cmd(self): """Make sure commands can be executed asynchronously.""" cmd = ExternalCommand('sleep 4', async=True) # Make sure we're starting from a sane state. assert not cmd.was_started assert not cmd.is_running assert not cmd.is_finished # Start the external command. cmd.start() def assert_running(): """ Make sure command switches to running state within a reasonable time. This is sensitive to timing issues on slow or overloaded systems, the retry logic is there to make the test pass as quickly as possible while still allowing for some delay. """ assert cmd.was_started assert cmd.is_running assert not cmd.is_finished retry(assert_running, timeout=4) # Wait for the external command to finish. cmd.wait() # Make sure we finished in a sane state. assert cmd.was_started assert not cmd.is_running assert cmd.is_finished assert cmd.returncode == 0 def test_async_with_input(self): """Make sure asynchronous commands can be provided standard input.""" random_file = os.path.join(tempfile.gettempdir(), 'executor-%s-async-input-test' % os.getpid()) random_value = str(random.random()) cmd = ExternalCommand('cat > %s' % quote(random_file), async=True, input=random_value) try: cmd.start() cmd.wait() assert os.path.isfile(random_file) with open(random_file) as handle: contents = handle.read() assert random_value == contents.strip() finally: if os.path.isfile(random_file): os.unlink(random_file) def test_async_with_output(self): """Make sure asynchronous command output can be captured.""" random_value = str(random.random()) cmd = ExternalCommand('echo %s' % quote(random_value), async=True, capture=True) cmd.start() cmd.wait() assert cmd.output == random_value def test_callback_evaluation(self): """Make sure result processing callbacks work as expected.""" result = execute('echo', str(time.time()), callback=self.coerce_timestamp) assert isinstance(result, datetime.datetime) def coerce_timestamp(self, cmd): """Callback for :func:`test_callback_evaluation()`.""" return datetime.datetime.fromtimestamp(float(cmd.output)) def test_event_callbacks(self): """Make sure the ``start_event`` and ``finish_event`` callbacks are actually invoked.""" for async in True, False: results = [] cmd = ExternalCommand( 'sleep', '0.1', async=async, start_event=lambda cmd: results.append(('started', time.time())), finish_event=lambda cmd: results.append(('finished', time.time())), ) cmd.start() mapping = dict(results) assert 'started' in mapping cmd.wait() mapping = dict(results) assert 'finished' in mapping assert mapping['finished'] > mapping['started'] def test_repr(self): """Make sure that repr() on external commands gives sane output.""" cmd = ExternalCommand('echo 42', async=True, capture=True, directory='/', environment={'my_environment_variable': '42'}) assert repr(cmd).startswith('ExternalCommand(') assert repr(cmd).endswith(')') assert 'echo 42' in repr(cmd) assert 'async=True' in repr(cmd) assert ('directory=%r' % '/') in repr(cmd) assert 'my_environment_variable' in repr(cmd) assert 'was_started=False' in repr(cmd) assert 'is_running=False' in repr(cmd) assert 'is_finished=False' in repr(cmd) cmd.start() def assert_finished(): """Allow for some delay before the external command finishes.""" assert 'was_started=True' in repr(cmd) assert 'is_running=False' in repr(cmd) assert 'is_finished=True' in repr(cmd) retry(assert_finished, 10) def test_retry(self): """Check that failing commands can be retried until they succeed.""" with TemporaryDirectory() as directory: script = self.create_retry_script(directory, 5) cmd = ExternalCommand(script, retry=True, retry_limit=10, shell=False) cmd.start() assert cmd.retry_count == 4 assert cmd.returncode == 0 def test_retry_limit(self): """Check that failing commands aren't retried indefinitely.""" with TemporaryDirectory() as directory: script = self.create_retry_script(directory, 5) cmd = ExternalCommand(script, check=False, retry=True, retry_limit=2, shell=False) cmd.start() assert cmd.retry_count == 2 assert cmd.returncode == 42 def create_retry_script(self, directory, iterations=2): """Create a script that fails until the fifth run :-).""" unique_name = uuid.uuid4().hex script = os.path.join(directory, '%s.sh' % unique_name) data_file = os.path.join(directory, '%s.txt' % unique_name) with open(script, 'w') as handle: handle.write(dedent(''' #!/bin/bash -e ITERATION=$(cat {data_file} 2>/dev/null || echo 1) echo $(($ITERATION + 1)) > {data_file} if [ $ITERATION -ge {limit} ]; then exit 0 else exit 42 fi ''', data_file=data_file, limit=iterations)) os.chmod(script, 0o777) return script def test_command_pool(self): """Make sure command pools actually run multiple commands in parallel.""" num_commands = 10 sleep_time = 4 pool = CommandPool(5) for i in range(num_commands): pool.add(ExternalCommand('sleep %i' % sleep_time)) timer = Timer() results = pool.run() assert all(cmd.returncode == 0 for cmd in results.values()) assert timer.elapsed_time < (num_commands * sleep_time) def test_command_pool_resumable(self): """Make sure command pools can be resumed after raising exceptions.""" pool = CommandPool() # Prepare two commands that will both raise an exception. c1 = ExternalCommand('exit 1', check=True) c2 = ExternalCommand('exit 42', check=True) # Add the commands to the pool and start them. pool.add(c1) pool.add(c2) pool.spawn() # Wait for both commands to finish. while not pool.is_finished: time.sleep(0.1) # The first call to collect() should raise an exception about `exit 1'. e1 = intercept(ExternalCommandFailed, pool.collect) assert e1.command is c1 # The second call to collect() should raise an exception about `exit 42'. e2 = intercept(ExternalCommandFailed, pool.collect) assert e2.command is c2 def test_command_pool_retry(self): """Make sure command pools can retry failing commands.""" with TemporaryDirectory() as directory: pool = CommandPool(concurrency=2, delay_checks=True) # Create a shell script that succeeds on the second run and retry # it exactly once. We expect this command to have succeeded when # the command pool is finished. script_1 = self.create_retry_script(directory, 2) command_1 = ExternalCommand(script_1, async=True, retry=True, retry_limit=1) pool.add(command_1) # Create a shell script that succeeds on the fourth run and retry # it up to two times. We expect this command to have failed when # the command pool is finished. script_2 = self.create_retry_script(directory, 4) command_2 = ExternalCommand(script_2, async=True, retry=True, retry_limit=2) pool.add(command_2) # Include a command without retries that succeeds. command_3 = ExternalCommand('true', async=True, retry=False) pool.add(command_3) # Include a command without retries that fails. command_4 = ExternalCommand('false', async=True, retry=False) pool.add(command_4) # Run the commands in the pool, expecting an `CommandPoolFailed' # exception because the second command will fail despite retrying # and the fourth command fails on its first and only run. self.assertRaises(CommandPoolFailed, pool.run) # Check that the first command succeeded (with a retry). assert command_1.succeeded assert command_1.retry_count == 1 # Check that the second command failed (with retries). assert command_2.failed assert command_2.retry_count == 2 # Check that the third command succeeded (without retries). assert command_3.succeeded assert command_3.retry_count == 0 # Check that the fourth command failed (without retries). assert command_4.failed assert command_4.retry_count == 0 def test_command_pool_termination(self): """Make sure command pools can be terminated on failure.""" pool = CommandPool() # Include a command that just sleeps for a minute. sleep_cmd = ExternalCommand('sleep 60') pool.add(sleep_cmd) # Include a command that immediately exits with a nonzero return code. pool.add(ExternalCommand('exit 1', check=True)) # Start the command pool and terminate it as soon as the control flow # returns to us (because `exit 1' causes an exception to be raised). try: pool.run() assert False, "Assumed CommandPool.run() to raise ExternalCommandFailed!" except ExternalCommandFailed as e: # Make sure the exception was properly tagged. assert e.pool == pool # Make sure the sleep command was terminated. assert sleep_cmd.is_terminated def test_command_pool_delay_checks(self): """Make sure command pools can delay error checking until all commands have finished.""" pool = CommandPool(delay_checks=True) # Include a command that fails immediately. pool.add(ExternalCommand('exit 1', check=True)) # Include some commands that just sleep for a while. pool.add(ExternalCommand('sleep 1', check=True)) pool.add(ExternalCommand('sleep 2', check=True)) pool.add(ExternalCommand('sleep 3', check=True)) # Make sure the expected exception is raised. self.assertRaises(CommandPoolFailed, pool.run) # Make sure all commands were started. assert all(cmd.was_started for id, cmd in pool.commands) # Make sure all commands finished. assert all(cmd.is_finished for id, cmd in pool.commands) def test_command_pool_delay_checks_noop(self): """Make sure command pools with delayed error checking don't raise when ``check=False``.""" pool = CommandPool(delay_checks=True) # Include a command that fails immediately. pool.add(ExternalCommand('exit 1', check=False)) # Run the command pool without catching exceptions; we don't except any. pool.run() # Make sure the command failed even though the exception wasn't raised. assert all(cmd.failed for id, cmd in pool.commands) def test_command_pool_logs_directory(self): """Make sure command pools can log output of commands in a directory.""" with TemporaryDirectory() as root_directory: identifiers = [1, 2, 3, 4, 5] sub_directory = os.path.join(root_directory, 'does-not-exist-yet') pool = CommandPool(concurrency=5, logs_directory=sub_directory) for i in identifiers: pool.add(identifier=i, command=ExternalCommand('echo %i' % i)) pool.run() files = os.listdir(sub_directory) assert sorted(files) == sorted(['%s.log' % i for i in identifiers]) for filename in files: with open(os.path.join(sub_directory, filename)) as handle: contents = handle.read() assert filename == ('%s.log' % contents.strip()) def test_concurrency_control_with_groups(self): """Make sure command pools support ``group_by`` for high level concurrency control.""" pool = CommandPool(concurrency=10) for i in range(10): pool.add(ExternalCommand('sleep 0.1', group_by='group-a')) for i in range(10): pool.add(ExternalCommand('sleep 0.1', group_by='group-b')) while not pool.is_finished: pool.spawn() # Make sure we never see more than two commands running at the same # time (because the commands are spread over two command groups). assert pool.num_running <= 2 pool.collect() def test_concurrency_control_with_dependencies(self): """Make sure command pools support ``dependencies`` for low level concurrency control.""" pool = CommandPool(concurrency=10) group_one = [ExternalCommand('sleep 0.1') for i in range(5)] group_two = [ExternalCommand('sleep 0.1', dependencies=group_one) for i in range(5)] group_three = [ExternalCommand('sleep 0.1', dependencies=group_two) for i in range(5)] for group in group_one, group_two, group_three: for cmd in group: pool.add(cmd) while not pool.is_finished: pool.spawn() # Make sure we never see more than one group of commands running at # the same time (because we've set up the dependencies like this). assert pool.num_running <= 5 pool.collect() def test_ssh_user_at_host(self): """Make sure a username can be injected via an SSH alias.""" cmd = RemoteCommand('root@host', 'true') assert cmd.ssh_user == 'root' assert cmd.ssh_alias == 'host' assert cmd.have_superuser_privileges def test_ssh_command_lines(self): """Make sure SSH client command lines are correctly generated.""" # Construct a remote command using as much defaults as possible and # validate the resulting SSH client program command line. cmd = RemoteCommand('localhost', 'true', ssh_user='some-random-user') for token in ( 'ssh', '-o', 'BatchMode=yes', '-o', 'ConnectTimeout=%i' % DEFAULT_CONNECT_TIMEOUT, '-o', 'StrictHostKeyChecking=no', '-l', 'some-random-user', 'localhost', 'true', ): assert token in tokenize_command_line(cmd) # Make sure compression can be enabled. assert '-C' in \ RemoteCommand('localhost', 'date', compression=True).command_line # Make sure batch mode can be disabled. assert 'BatchMode=no' in \ RemoteCommand('localhost', 'date', batch_mode=False).command_line # Make sure the connection timeout can be configured. assert 'ConnectTimeout=42' in \ RemoteCommand('localhost', 'date', connect_timeout=42).command_line # Make sure the SSH client program command can be configured. assert 'Compression=yes' in \ RemoteCommand('localhost', 'date', ssh_command=['ssh', '-o', 'Compression=yes']).command_line # Make sure the known hosts file can be ignored. cmd = RemoteCommand('localhost', 'date', ignore_known_hosts=True) assert cmd.ignore_known_hosts cmd.ignore_known_hosts = False assert not cmd.ignore_known_hosts # Make sure strict host key checking can be enabled. assert 'StrictHostKeyChecking=yes' in \ RemoteCommand('localhost', 'date', strict_host_key_checking=True).command_line assert 'StrictHostKeyChecking=yes' in \ RemoteCommand('localhost', 'date', strict_host_key_checking='yes').command_line # Make sure host key checking can be set to prompt the operator. assert 'StrictHostKeyChecking=ask' in \ RemoteCommand('localhost', 'date', strict_host_key_checking='ask').command_line # Make sure strict host key checking can be disabled. assert 'StrictHostKeyChecking=no' in \ RemoteCommand('localhost', 'date', strict_host_key_checking=False).command_line assert 'StrictHostKeyChecking=no' in \ RemoteCommand('localhost', 'date', strict_host_key_checking='no').command_line # Make sure fakeroot and sudo requests are honored. assert 'fakeroot' in \ tokenize_command_line(RemoteCommand('localhost', 'date', fakeroot=True)) assert 'sudo' in \ tokenize_command_line(RemoteCommand('localhost', 'date', sudo=True)) assert 'sudo' not in \ tokenize_command_line(RemoteCommand('localhost', 'date', ssh_user='root', sudo=True)) def test_ssh_unreachable(self): """Make sure a specific exception is raised when ``ssh`` fails to connect.""" # Make sure invalid SSH aliases raise the expected type of exception. self.assertRaises( RemoteConnectFailed, remote, 'this.domain.surely.wont.exist.right', 'date', silent=True, ) def test_remote_command_missing(self): """Make sure a specific exception is raised when a remote command is missing.""" with SSHServer() as server: self.assertRaises( RemoteCommandNotFound, remote, '127.0.0.1', MISSING_COMMAND, **server.client_options ) def test_remote_working_directory(self): """Make sure remote working directories can be set.""" with SSHServer() as server: with TemporaryDirectory() as some_random_directory: output = remote('127.0.0.1', 'pwd', capture=True, directory=some_random_directory, **server.client_options) assert output == some_random_directory def test_remote_error_handling(self): """Make sure remote commands preserve exit codes.""" with SSHServer() as server: cmd = RemoteCommand('127.0.0.1', 'exit 42', **server.client_options) self.assertRaises(RemoteCommandFailed, cmd.start) def test_foreach(self): """Make sure remote command pools work.""" with SSHServer() as server: ssh_aliases = ['127.0.0.%i' % i for i in (1, 2, 3, 4, 5, 6, 7, 8)] results = foreach(ssh_aliases, 'echo $SSH_CONNECTION', concurrency=3, capture=True, **server.client_options) assert sorted(ssh_aliases) == sorted(cmd.ssh_alias for cmd in results) assert len(ssh_aliases) == len(set(cmd.output for cmd in results)) def test_foreach_with_logging(self): """Make sure remote command pools can log output.""" with TemporaryDirectory() as directory: ssh_aliases = ['127.0.0.%i' % i for i in (1, 2, 3, 4, 5, 6, 7, 8)] with SSHServer() as server: foreach(ssh_aliases, 'echo $SSH_CONNECTION', concurrency=3, logs_directory=directory, capture=True, **server.client_options) log_files = os.listdir(directory) assert len(log_files) == len(ssh_aliases) assert all(os.path.getsize(os.path.join(directory, fn)) > 0 for fn in log_files) def test_chroot_command(self): """ Test support for chroot commands. For now this test doesn't actually run ``chroot`` because automating the creation of chroots using ``debootstrap`` just to run these tests is a lot of work that I haven't done (yet). """ chroot = '/var/lib/chroots/executor' chroot_group = 'my-group' chroot_user = 'my-user' command = ['echo', '42'] context = ChangeRootContext(chroot) cmd = context.prepare(*command, chroot_group=chroot_group, chroot_user=chroot_user) assert CHROOT_PROGRAM_NAME in cmd.command_line assert '--userspec=%s:%s' % (chroot_user, chroot_group) in cmd.command_line assert cmd.command_line[-len(command):] == command # Make sure sudo is avoided when possible. cmd = context.prepare('apt-get', 'update', sudo=True) command_in_chroot = cmd.command_line[cmd.command_line.index('chroot'):] assert 'sudo' not in command_in_chroot # Make sure sudo is used when necessary. cmd = context.prepare('apt-get', 'update', chroot_user='nobody', sudo=True) command_in_chroot = cmd.command_line[cmd.command_line.index('chroot'):] assert 'sudo' in command_in_chroot # Make sure the working directory is handled correctly. directory_in_chroot = '/relative/to/chroot' cmd = context.prepare('pwd', directory=directory_in_chroot) assert cmd.directory == DEFAULT_WORKING_DIRECTORY assert cmd.chroot_directory == directory_in_chroot assert any(directory_in_chroot in token for token in cmd.command_line) def test_schroot_command(self): """ Test support for schroot commands. For now this test doesn't actually run ``schroot`` because automating the installation of ``schroot`` and the creation of chroots using ``debootstrap`` just to run these tests is a lot of work that I haven't done (yet). """ chroot_name = 'name-of-chroot' chroot_user = 'user-in-chroot' chroot_directory = '/path/relative/to/chroot' command = ['echo', '42'] context = SecureChangeRootContext(chroot_name, chroot_directory=chroot_directory, chroot_user=chroot_user) cmd = context.prepare(*command) assert SCHROOT_PROGRAM_NAME in cmd.command_line assert ('--chroot=%s' % chroot_name) in cmd.command_line assert ('--user=%s' % chroot_user) in cmd.command_line assert ('--directory=%s' % chroot_directory) in cmd.command_line assert cmd.command_line[-len(command):] == command other_chroot_directory = '/other/path/relative/to/chroot' cmd.directory = other_chroot_directory assert cmd.directory == DEFAULT_WORKING_DIRECTORY assert cmd.chroot_directory == other_chroot_directory def test_create_context(self): """Test context creation.""" assert isinstance(create_context(), LocalContext) assert isinstance(create_context(ssh_alias=None), LocalContext) assert isinstance(create_context(ssh_alias='whatever'), RemoteContext) assert create_context(ssh_alias='whatever').ssh_alias == 'whatever' assert create_context(sudo=True).options['sudo'] is True assert create_context(sudo=False).options['sudo'] is False def test_lsb_release_shortcuts(self): """Test the ``lsb_release`` shortcuts.""" try: # The following tests should pass on my laptops and Travis CI. context = LocalContext() assert context.distributor_id == 'ubuntu' assert context.distribution_codename in ('precise', 'trusty', 'xenial') except AssertionError: # But I don't want this test to fail on `unexpected' # platforms so here's a pragmatic compromise :-). return self.skipTest("assuming unsupported platform") def test_lsb_release_error_handling(self): """Test that the ``lsb_release`` shortcuts don't raise exceptions on unsupported platforms.""" context = LocalContext(environment=dict(PATH='')) assert context.distributor_id == '' assert context.distribution_codename == '' def test_local_context(self): """Test a local command context.""" self.check_context(LocalContext()) def test_remote_context(self): """Test a remote command context.""" with SSHServer() as server: self.check_context(RemoteContext('127.0.0.1', **server.client_options)) def check_context(self, context): """Test a command execution context (whether local or remote).""" # Make sure __str__() does `something useful'. assert 'system' in str(context) # Make sure context.cpu_count is supported. assert context.cpu_count >= 1 # Test context.execute() and cleanup(). random_file = os.path.join(self.sudo_enabled_directory, uuid.uuid4().hex) # Prepare to test context.cleanup() with a callable. cleanup_callback = MagicMock() with context: # Schedule to invoke our callback before the with block ends. context.cleanup(cleanup_callback, 42, keyword='value') # Make sure the test directory exists. assert context.exists(self.sudo_enabled_directory) assert context.is_directory(self.sudo_enabled_directory) assert not context.is_file(self.sudo_enabled_directory) # Make sure the random file doesn't exist yet. assert not context.exists(random_file) # Create the random file. context.execute('touch', random_file) # Make sure the file was created. assert context.exists(random_file) assert context.is_file(random_file) assert not context.is_directory(random_file) # Make sure the file is readable and writable. assert context.is_readable(random_file) assert context.is_writable(random_file) # Make sure the file isn't executable. context.execute('chmod', '-x', random_file) assert not context.is_executable(random_file) # Make sure we can make the file executable. context.execute('chmod', '+x', random_file) assert context.is_executable(random_file) # Schedule to clean up the file. context.cleanup('rm', '-f', random_file) # Make sure the file hasn't actually been removed yet. assert context.exists(random_file) # Find the file using a filename pattern. matches = context.glob(os.path.join(self.sudo_enabled_directory, '*')) assert random_file in matches # The following tests only make sense when we're not already # running with superuser privileges. if os.getuid() != 0: # Revoke our privileges to the file. context.execute('chown', 'root:root', random_file, sudo=True) context.execute('chmod', '600', random_file, sudo=True) # Make sure the file is no longer readable or writable. assert not context.is_readable(random_file) assert not context.is_writable(random_file) # Make sure our cleanup callback was invoked correctly. assert cleanup_callback.called_with(42, keyword='value') # Make sure the file has been removed (__exit__). assert not context.exists(random_file) # Test context.capture(). assert context.capture('hostname') == socket.gethostname() # Test context.read_file() and context.write_file() and make sure they # are binary safe (i.e. they should be usable for non-text files). random_file = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex) assert not os.path.exists(random_file) expected_contents = bytes(random.randint(0, 255) for i in range(25)) context.write_file(random_file, expected_contents) # Make sure the file was indeed created. assert os.path.exists(random_file) # Make sure the contents are correct. actual_contents = context.read_file(random_file) assert actual_contents == expected_contents # Test the happy path in context.atomic_write(). random_file = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex) expected_contents = bytes(random.randint(0, 255) for i in range(25)) assert not context.exists(random_file) with context.atomic_write(random_file) as temporary_file: context.write_file(temporary_file, expected_contents) assert not context.exists(random_file) assert not context.exists(temporary_file) assert context.exists(random_file) assert context.read_file(random_file) == expected_contents # Test the failure handling in context.atomic_write(). random_file = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex) try: assert not context.exists(random_file) with context.atomic_write(random_file) as temporary_file: context.write_file(temporary_file, '') assert context.exists(temporary_file) # Interrupt the `with' block by raising an exception. raise Exception except Exception: pass finally: # Make sure the temporary file was cleaned up. assert not context.exists(temporary_file) # Make sure the target file wasn't created. assert not context.exists(random_file) # Test context.list_entries() and make sure it doesn't # mangle filenames containing whitespace. nasty_filenames = [ 'something-innocent', 'now with spaces', 'and\twith\ttabs', 'and\nfinally\nnewlines', ] with TemporaryDirectory() as directory: # Create files with nasty names :-). for filename in nasty_filenames: with open(os.path.join(directory, filename), 'w') as handle: handle.write('\n') # List the directory entries. parsed_filenames = context.list_entries(directory) # Make sure all filenames were parsed correctly. assert sorted(nasty_filenames) == sorted(parsed_filenames) def test_cli_usage(self): """Make sure the command line interface properly presents its usage message.""" for arguments in [], ['-h'], ['--help']: returncode, output = run_cli(main, *arguments) assert returncode == 0 assert "Usage: executor" in output def test_cli_return_codes(self): """Make sure the command line interface doesn't swallow exit codes.""" returncode, output = run_cli(main, *python_golf('import sys', 'sys.exit(0)')) assert returncode == 0 returncode, output = run_cli(main, *python_golf('import sys', 'sys.exit(1)')) assert returncode == 1 returncode, output = run_cli(main, *python_golf('import sys', 'sys.exit(42)')) assert returncode == 42 def test_cli_fudge_factor(self, fudge_factor=5): """Try to ensure that the fudge factor applies (a bit tricky to get right) ...""" def fudge_factor_hammer(): timer = Timer() returncode, output = run_cli( main, '--fudge-factor=%i' % fudge_factor, *python_golf('import sys', 'sys.exit(0)') ) assert returncode == 0 assert timer.elapsed_time > (fudge_factor / 2.0) retry(fudge_factor_hammer, 60) def test_cli_exclusive_locking(self): """Ensure that exclusive locking works as expected.""" returncode, output = run_cli( main, '--exclusive', *python_golf('import sys', 'sys.exit(0)') ) assert returncode == 0 def test_cli_timeout(self): """Ensure that external commands can be timed out.""" def timeout_hammer(): timer = Timer() returncode, output = run_cli( main, '--timeout=5', *python_golf('import time', 'time.sleep(10)') ) assert returncode != 0 assert timer.elapsed_time < 10 retry(timeout_hammer, 60) def intercept(exc_type, func, *args, **kw): """Intercept and return a raised exception.""" try: func(*args, **kw) except exc_type as e: return e else: assert False, "Expected exception to be raised, but nothing happened! :-s" def tokenize_command_line(cmd): """Tokenize a command line string into a list of strings.""" return sum(map(shlex.split, cmd.command_line), []) def python_golf(*statements): """Generate a Python command line.""" return sys.executable, '-c', '; '.join(statements) class NonGracefulCommand(ExternalCommand): """Wrapper for :class:`~executor.process.ControllableProcess` that disables graceful termination.""" def terminate_helper(self, *args, **kw): """Swallow graceful termination signals.""" self.logger.debug(compact(""" Process termination using subprocess.Popen.terminate() intentionally disabled to simulate processes that refuse to terminate gracefully .. """)) class NonForcefulCommand(NonGracefulCommand): """Wrapper for :class:`~executor.process.ControllableProcess` that disables graceful and forceful termination.""" def kill_helper(self, *args, **kw): """Swallow forceful termination signals.""" self.logger.debug(compact(""" Process termination using subprocess.Popen.kill() intentionally disabled to simulate processes that refuse to terminate forcefully .. """))
py
1a3ab3ac303ac7f7a3265cfd16af05448f22dc60
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tests.helpers.pipelines as tpipes import tests.helpers.utils as tutils from pytorch_lightning.callbacks import EarlyStopping from pytorch_lightning.trainer import Trainer from pytorch_lightning.utilities import memory from tests.helpers import BoringModel from tests.helpers.datamodules import ClassifDataModule from tests.helpers.runif import RunIf from tests.helpers.simple_models import ClassificationModel @RunIf(min_gpus=2) def test_multi_gpu_early_stop_ddp_spawn(tmpdir): tutils.set_random_main_port() trainer_options = dict( default_root_dir=tmpdir, callbacks=[EarlyStopping(monitor="train_acc")], max_epochs=50, limit_train_batches=10, limit_val_batches=10, gpus=[0, 1], strategy="ddp_spawn", ) dm = ClassifDataModule() model = ClassificationModel() tpipes.run_model_test(trainer_options, model, dm) @RunIf(min_gpus=2) def test_multi_gpu_model_ddp_spawn(tmpdir): tutils.set_random_main_port() trainer_options = dict( default_root_dir=tmpdir, max_epochs=1, limit_train_batches=10, limit_val_batches=10, gpus=[0, 1], strategy="ddp_spawn", enable_progress_bar=False, ) model = BoringModel() tpipes.run_model_test(trainer_options, model) # test memory helper functions memory.get_memory_profile("min_max") @RunIf(min_gpus=2) def test_ddp_all_dataloaders_passed_to_fit(tmpdir): """Make sure DDP works with dataloaders passed to fit()""" tutils.set_random_main_port() model = BoringModel() trainer = Trainer( default_root_dir=tmpdir, enable_progress_bar=False, max_epochs=1, limit_train_batches=0.2, limit_val_batches=0.2, gpus=[0, 1], strategy="ddp_spawn", ) trainer.fit(model, train_dataloaders=model.train_dataloader(), val_dataloaders=model.val_dataloader()) assert trainer.state.finished, "DDP doesn't work with dataloaders passed to fit()."
py
1a3ab5109a1c8a0de15cb802dfb3568a66f0b9fd
import numpy as np import pandas as pd from hamcrest import assert_that, has_item import cifrum as lib from conftest import decimal_places from cifrum._settings import _MONTHS_PER_YEAR __asset_name = 'index/OKID10' def test__present_in_available_names(): sym_ids = [x.fin_sym_id.format() for x in lib.available_names(namespace='index')] assert_that(sym_ids, has_item(__asset_name)) def test__have_valid_max_period_range(): okid10 = lib.portfolio_asset(name=__asset_name) cbr_top10 = lib.information(name='cbr/TOP_rates') assert okid10.close().start_period == cbr_top10.start_period + _MONTHS_PER_YEAR assert (cbr_top10.end_period - okid10.close().end_period).n < 2 def test__have_valid_selected_period_range(): start_period = pd.Period('2013-1', freq='M') end_period = pd.Period('2015-3', freq='M') okid10 = lib.portfolio_asset(name=__asset_name, start_period=str(start_period), end_period=str(end_period)) assert okid10.close().start_period == start_period assert okid10.close().end_period == end_period def test__have_correct_values(): okid10 = lib.portfolio_asset(name=__asset_name, end_period='2018-12') np.testing.assert_almost_equal(okid10.close()[:5].values, [100., 100.9854, 101.9356, 102.8515, 103.7328], decimal_places) np.testing.assert_almost_equal(okid10.close()[-5:].values, [212.0694, 213.2737, 214.4767, 215.6832, 216.8961], decimal_places) def test__compute_correctly_in_other_currencies(): okid10_usd = lib.portfolio_asset(name=__asset_name, end_period='2018-12', currency='usd') okid10_rub = lib.portfolio_asset(name=__asset_name, end_period='2018-12', currency='rub') okid10_currency_rate = okid10_usd.close() / okid10_rub.close() vs_rub = lib.portfolio_asset(name='cbr/RUB', start_period=okid10_currency_rate.start_period, end_period=okid10_currency_rate.end_period, currency='usd').close() np.testing.assert_almost_equal(okid10_currency_rate.values, vs_rub.values, decimal_places)
py
1a3ab58c2150d02c01de99ac672a4b2c4796a7f3
# coding: utf-8 import pprint import re import six class ShowEventDataRequest: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'namespace': 'str', 'dim_0': 'str', 'dim_1': 'str', 'dim_2': 'str', 'dim_3': 'str', 'type': 'str', '_from': 'int', 'to': 'int' } attribute_map = { 'namespace': 'namespace', 'dim_0': 'dim.0', 'dim_1': 'dim.1', 'dim_2': 'dim.2', 'dim_3': 'dim.3', 'type': 'type', '_from': 'from', 'to': 'to' } def __init__(self, namespace=None, dim_0=None, dim_1=None, dim_2=None, dim_3=None, type=None, _from=None, to=None): """ShowEventDataRequest - a model defined in huaweicloud sdk""" self._namespace = None self._dim_0 = None self._dim_1 = None self._dim_2 = None self._dim_3 = None self._type = None self.__from = None self._to = None self.discriminator = None self.namespace = namespace self.dim_0 = dim_0 if dim_1 is not None: self.dim_1 = dim_1 if dim_2 is not None: self.dim_2 = dim_2 if dim_3 is not None: self.dim_3 = dim_3 self.type = type self._from = _from self.to = to @property def namespace(self): """Gets the namespace of this ShowEventDataRequest. :return: The namespace of this ShowEventDataRequest. :rtype: str """ return self._namespace @namespace.setter def namespace(self, namespace): """Sets the namespace of this ShowEventDataRequest. :param namespace: The namespace of this ShowEventDataRequest. :type: str """ self._namespace = namespace @property def dim_0(self): """Gets the dim_0 of this ShowEventDataRequest. :return: The dim_0 of this ShowEventDataRequest. :rtype: str """ return self._dim_0 @dim_0.setter def dim_0(self, dim_0): """Sets the dim_0 of this ShowEventDataRequest. :param dim_0: The dim_0 of this ShowEventDataRequest. :type: str """ self._dim_0 = dim_0 @property def dim_1(self): """Gets the dim_1 of this ShowEventDataRequest. :return: The dim_1 of this ShowEventDataRequest. :rtype: str """ return self._dim_1 @dim_1.setter def dim_1(self, dim_1): """Sets the dim_1 of this ShowEventDataRequest. :param dim_1: The dim_1 of this ShowEventDataRequest. :type: str """ self._dim_1 = dim_1 @property def dim_2(self): """Gets the dim_2 of this ShowEventDataRequest. :return: The dim_2 of this ShowEventDataRequest. :rtype: str """ return self._dim_2 @dim_2.setter def dim_2(self, dim_2): """Sets the dim_2 of this ShowEventDataRequest. :param dim_2: The dim_2 of this ShowEventDataRequest. :type: str """ self._dim_2 = dim_2 @property def dim_3(self): """Gets the dim_3 of this ShowEventDataRequest. :return: The dim_3 of this ShowEventDataRequest. :rtype: str """ return self._dim_3 @dim_3.setter def dim_3(self, dim_3): """Sets the dim_3 of this ShowEventDataRequest. :param dim_3: The dim_3 of this ShowEventDataRequest. :type: str """ self._dim_3 = dim_3 @property def type(self): """Gets the type of this ShowEventDataRequest. :return: The type of this ShowEventDataRequest. :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this ShowEventDataRequest. :param type: The type of this ShowEventDataRequest. :type: str """ self._type = type @property def _from(self): """Gets the _from of this ShowEventDataRequest. :return: The _from of this ShowEventDataRequest. :rtype: int """ return self.__from @_from.setter def _from(self, _from): """Sets the _from of this ShowEventDataRequest. :param _from: The _from of this ShowEventDataRequest. :type: int """ self.__from = _from @property def to(self): """Gets the to of this ShowEventDataRequest. :return: The to of this ShowEventDataRequest. :rtype: int """ return self._to @to.setter def to(self, to): """Sets the to of this ShowEventDataRequest. :param to: The to of this ShowEventDataRequest. :type: int """ self._to = to def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ShowEventDataRequest): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
py
1a3ab5cb0cf1fa53f68c7494497f4d8fe1edc495
# -*- coding: utf-8 -*- # Copyright (C) 2014-2016 Andrey Antukh <[email protected]> # Copyright (C) 2014-2016 Jesús Espino <[email protected]> # Copyright (C) 2014-2016 David Barragán <[email protected]> # Copyright (C) 2014-2016 Alejandro Alonso <[email protected]> # Copyright (C) 2014-2016 Anler Hernández <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.core.urlresolvers import reverse from taiga.base.utils import json from taiga.projects import choices as project_choices from taiga.projects.models import Project from taiga.projects.utils import attach_extra_info as attach_project_extra_info from taiga.projects.milestones.serializers import MilestoneSerializer from taiga.projects.milestones.models import Milestone from taiga.projects.milestones.utils import attach_extra_info as attach_milestone_extra_info from taiga.projects.notifications.services import add_watcher from taiga.permissions.choices import MEMBERS_PERMISSIONS, ANON_PERMISSIONS from tests import factories as f from tests.utils import helper_test_http_method, disconnect_signals, reconnect_signals import pytest pytestmark = pytest.mark.django_db def setup_module(module): disconnect_signals() def teardown_module(module): reconnect_signals() @pytest.fixture def data(): m = type("Models", (object,), {}) m.registered_user = f.UserFactory.create() m.project_member_with_perms = f.UserFactory.create() m.project_member_without_perms = f.UserFactory.create() m.project_owner = f.UserFactory.create() m.other_user = f.UserFactory.create() m.public_project = f.ProjectFactory(is_private=False, anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)), public_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)), owner=m.project_owner) m.public_project = attach_project_extra_info(Project.objects.all()).get(id=m.public_project.id) m.private_project1 = f.ProjectFactory(is_private=True, anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)), public_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)), owner=m.project_owner) m.private_project1 = attach_project_extra_info(Project.objects.all()).get(id=m.private_project1.id) m.private_project2 = f.ProjectFactory(is_private=True, anon_permissions=[], public_permissions=[], owner=m.project_owner) m.private_project2 = attach_project_extra_info(Project.objects.all()).get(id=m.private_project2.id) m.blocked_project = f.ProjectFactory(is_private=True, anon_permissions=[], public_permissions=[], owner=m.project_owner, blocked_code=project_choices.BLOCKED_BY_STAFF) m.blocked_project = attach_project_extra_info(Project.objects.all()).get(id=m.blocked_project.id) m.public_membership = f.MembershipFactory( project=m.public_project, user=m.project_member_with_perms, role__project=m.public_project, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) m.private_membership1 = f.MembershipFactory( project=m.private_project1, user=m.project_member_with_perms, role__project=m.private_project1, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) f.MembershipFactory(project=m.private_project1, user=m.project_member_without_perms, role__project=m.private_project1, role__permissions=[]) m.private_membership2 = f.MembershipFactory( project=m.private_project2, user=m.project_member_with_perms, role__project=m.private_project2, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) f.MembershipFactory(project=m.private_project2, user=m.project_member_without_perms, role__project=m.private_project2, role__permissions=[]) m.blocked_membership = f.MembershipFactory( project=m.blocked_project, user=m.project_member_with_perms, role__project=m.blocked_project, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) f.MembershipFactory(project=m.blocked_project, user=m.project_member_without_perms, role__project=m.blocked_project, role__permissions=[]) f.MembershipFactory(project=m.public_project, user=m.project_owner, is_admin=True) f.MembershipFactory(project=m.private_project1, user=m.project_owner, is_admin=True) f.MembershipFactory(project=m.private_project2, user=m.project_owner, is_admin=True) f.MembershipFactory(project=m.blocked_project, user=m.project_owner, is_admin=True) m.public_milestone = f.MilestoneFactory(project=m.public_project) m.public_milestone = attach_milestone_extra_info(Milestone.objects.all()).get(id=m.public_milestone.id) m.private_milestone1 = f.MilestoneFactory(project=m.private_project1) m.private_milestone1 = attach_milestone_extra_info(Milestone.objects.all()).get(id=m.private_milestone1.id) m.private_milestone2 = f.MilestoneFactory(project=m.private_project2) m.private_milestone2 = attach_milestone_extra_info(Milestone.objects.all()).get(id=m.private_milestone2.id) m.blocked_milestone = f.MilestoneFactory(project=m.blocked_project) m.blocked_milestone = attach_milestone_extra_info(Milestone.objects.all()).get(id=m.blocked_milestone.id) return m def test_milestone_retrieve(client, data): public_url = reverse('milestones-detail', kwargs={"pk": data.public_milestone.pk}) private_url1 = reverse('milestones-detail', kwargs={"pk": data.private_milestone1.pk}) private_url2 = reverse('milestones-detail', kwargs={"pk": data.private_milestone2.pk}) blocked_url = reverse('milestones-detail', kwargs={"pk": data.blocked_milestone.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] results = helper_test_http_method(client, 'get', public_url, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private_url1, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private_url2, None, users) assert results == [401, 403, 403, 200, 200] results = helper_test_http_method(client, 'get', blocked_url, None, users) assert results == [401, 403, 403, 200, 200] def test_milestone_update(client, data): public_url = reverse('milestones-detail', kwargs={"pk": data.public_milestone.pk}) private_url1 = reverse('milestones-detail', kwargs={"pk": data.private_milestone1.pk}) private_url2 = reverse('milestones-detail', kwargs={"pk": data.private_milestone2.pk}) blocked_url = reverse('milestones-detail', kwargs={"pk": data.blocked_milestone.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] milestone_data = MilestoneSerializer(data.public_milestone).data milestone_data["name"] = "test" milestone_data = json.dumps(milestone_data) results = helper_test_http_method(client, 'put', public_url, milestone_data, users) assert results == [401, 403, 403, 200, 200] milestone_data = MilestoneSerializer(data.private_milestone1).data milestone_data["name"] = "test" milestone_data = json.dumps(milestone_data) results = helper_test_http_method(client, 'put', private_url1, milestone_data, users) assert results == [401, 403, 403, 200, 200] milestone_data = MilestoneSerializer(data.private_milestone2).data milestone_data["name"] = "test" milestone_data = json.dumps(milestone_data) results = helper_test_http_method(client, 'put', private_url2, milestone_data, users) assert results == [401, 403, 403, 200, 200] milestone_data = MilestoneSerializer(data.blocked_milestone).data milestone_data["name"] = "test" milestone_data = json.dumps(milestone_data) results = helper_test_http_method(client, 'put', blocked_url, milestone_data, users) assert results == [401, 403, 403, 451, 451] def test_milestone_delete(client, data): public_url = reverse('milestones-detail', kwargs={"pk": data.public_milestone.pk}) private_url1 = reverse('milestones-detail', kwargs={"pk": data.private_milestone1.pk}) private_url2 = reverse('milestones-detail', kwargs={"pk": data.private_milestone2.pk}) blocked_url = reverse('milestones-detail', kwargs={"pk": data.blocked_milestone.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, ] results = helper_test_http_method(client, 'delete', public_url, None, users) assert results == [401, 403, 403, 204] results = helper_test_http_method(client, 'delete', private_url1, None, users) assert results == [401, 403, 403, 204] results = helper_test_http_method(client, 'delete', private_url2, None, users) assert results == [401, 403, 403, 204] results = helper_test_http_method(client, 'delete', blocked_url, None, users) assert results == [401, 403, 403, 451] def test_milestone_list(client, data): url = reverse('milestones-list') response = client.get(url) milestones_data = json.loads(response.content.decode('utf-8')) assert len(milestones_data) == 2 assert response.status_code == 200 client.login(data.registered_user) response = client.get(url) milestones_data = json.loads(response.content.decode('utf-8')) assert len(milestones_data) == 2 assert response.status_code == 200 client.login(data.project_member_with_perms) response = client.get(url) milestones_data = json.loads(response.content.decode('utf-8')) assert len(milestones_data) == 4 assert response.status_code == 200 client.login(data.project_owner) response = client.get(url) milestones_data = json.loads(response.content.decode('utf-8')) assert len(milestones_data) == 4 assert response.status_code == 200 def test_milestone_create(client, data): url = reverse('milestones-list') users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] create_data = json.dumps({ "name": "test", "estimated_start": "2014-12-10", "estimated_finish": "2014-12-24", "project": data.public_project.pk, }) results = helper_test_http_method(client, 'post', url, create_data, users, lambda: Milestone.objects.all().delete()) assert results == [401, 403, 403, 201, 201] create_data = json.dumps({ "name": "test", "estimated_start": "2014-12-10", "estimated_finish": "2014-12-24", "project": data.private_project1.pk, }) results = helper_test_http_method(client, 'post', url, create_data, users, lambda: Milestone.objects.all().delete()) assert results == [401, 403, 403, 201, 201] create_data = json.dumps({ "name": "test", "estimated_start": "2014-12-10", "estimated_finish": "2014-12-24", "project": data.private_project2.pk, }) results = helper_test_http_method(client, 'post', url, create_data, users, lambda: Milestone.objects.all().delete()) assert results == [401, 403, 403, 201, 201] create_data = json.dumps({ "name": "test", "estimated_start": "2014-12-10", "estimated_finish": "2014-12-24", "project": data.blocked_project.pk, }) results = helper_test_http_method(client, 'post', url, create_data, users, lambda: Milestone.objects.all().delete()) assert results == [401, 403, 403, 451, 451] def test_milestone_patch(client, data): public_url = reverse('milestones-detail', kwargs={"pk": data.public_milestone.pk}) private_url1 = reverse('milestones-detail', kwargs={"pk": data.private_milestone1.pk}) private_url2 = reverse('milestones-detail', kwargs={"pk": data.private_milestone2.pk}) blocked_url = reverse('milestones-detail', kwargs={"pk": data.blocked_milestone.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] patch_data = json.dumps({"name": "test"}) results = helper_test_http_method(client, 'patch', public_url, patch_data, users) assert results == [401, 403, 403, 200, 200] patch_data = json.dumps({"name": "test"}) results = helper_test_http_method(client, 'patch', private_url1, patch_data, users) assert results == [401, 403, 403, 200, 200] patch_data = json.dumps({"name": "test"}) results = helper_test_http_method(client, 'patch', private_url2, patch_data, users) assert results == [401, 403, 403, 200, 200] patch_data = json.dumps({"name": "test"}) results = helper_test_http_method(client, 'patch', blocked_url, patch_data, users) assert results == [401, 403, 403, 451, 451] def test_milestone_action_stats(client, data): public_url = reverse('milestones-stats', kwargs={"pk": data.public_milestone.pk}) private_url1 = reverse('milestones-stats', kwargs={"pk": data.private_milestone1.pk}) private_url2 = reverse('milestones-stats', kwargs={"pk": data.private_milestone2.pk}) blocked_url = reverse('milestones-stats', kwargs={"pk": data.blocked_milestone.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] results = helper_test_http_method(client, 'get', public_url, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private_url1, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private_url2, None, users) assert results == [401, 403, 403, 200, 200] results = helper_test_http_method(client, 'get', blocked_url, None, users) assert results == [401, 403, 403, 200, 200] def test_milestone_action_watch(client, data): public_url = reverse('milestones-watch', kwargs={"pk": data.public_milestone.pk}) private_url1 = reverse('milestones-watch', kwargs={"pk": data.private_milestone1.pk}) private_url2 = reverse('milestones-watch', kwargs={"pk": data.private_milestone2.pk}) blocked_url = reverse('milestones-watch', kwargs={"pk": data.blocked_milestone.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] results = helper_test_http_method(client, 'post', public_url, "", users) assert results == [401, 200, 200, 200, 200] results = helper_test_http_method(client, 'post', private_url1, "", users) assert results == [401, 200, 200, 200, 200] results = helper_test_http_method(client, 'post', private_url2, "", users) assert results == [404, 404, 404, 200, 200] results = helper_test_http_method(client, 'post', blocked_url, "", users) assert results == [404, 404, 404, 451, 451] def test_milestone_action_unwatch(client, data): public_url = reverse('milestones-unwatch', kwargs={"pk": data.public_milestone.pk}) private_url1 = reverse('milestones-unwatch', kwargs={"pk": data.private_milestone1.pk}) private_url2 = reverse('milestones-unwatch', kwargs={"pk": data.private_milestone2.pk}) blocked_url = reverse('milestones-unwatch', kwargs={"pk": data.blocked_milestone.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] results = helper_test_http_method(client, 'post', public_url, "", users) assert results == [401, 200, 200, 200, 200] results = helper_test_http_method(client, 'post', private_url1, "", users) assert results == [401, 200, 200, 200, 200] results = helper_test_http_method(client, 'post', private_url2, "", users) assert results == [404, 404, 404, 200, 200] results = helper_test_http_method(client, 'post', blocked_url, "", users) assert results == [404, 404, 404, 451, 451] def test_milestone_watchers_list(client, data): public_url = reverse('milestone-watchers-list', kwargs={"resource_id": data.public_milestone.pk}) private_url1 = reverse('milestone-watchers-list', kwargs={"resource_id": data.private_milestone1.pk}) private_url2 = reverse('milestone-watchers-list', kwargs={"resource_id": data.private_milestone2.pk}) blocked_url = reverse('milestone-watchers-list', kwargs={"resource_id": data.blocked_milestone.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] results = helper_test_http_method(client, 'get', public_url, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private_url1, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private_url2, None, users) assert results == [401, 403, 403, 200, 200] results = helper_test_http_method(client, 'get', blocked_url, None, users) assert results == [401, 403, 403, 200, 200] def test_milestone_watchers_retrieve(client, data): add_watcher(data.public_milestone, data.project_owner) public_url = reverse('milestone-watchers-detail', kwargs={"resource_id": data.public_milestone.pk, "pk": data.project_owner.pk}) add_watcher(data.private_milestone1, data.project_owner) private_url1 = reverse('milestone-watchers-detail', kwargs={"resource_id": data.private_milestone1.pk, "pk": data.project_owner.pk}) add_watcher(data.private_milestone2, data.project_owner) private_url2 = reverse('milestone-watchers-detail', kwargs={"resource_id": data.private_milestone2.pk, "pk": data.project_owner.pk}) add_watcher(data.blocked_milestone, data.project_owner) blocked_url = reverse('milestone-watchers-detail', kwargs={"resource_id": data.blocked_milestone.pk, "pk": data.project_owner.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] results = helper_test_http_method(client, 'get', public_url, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private_url1, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private_url2, None, users) assert results == [401, 403, 403, 200, 200] results = helper_test_http_method(client, 'get', blocked_url, None, users) assert results == [401, 403, 403, 200, 200]
py
1a3ab6211a2a26fbf939f448ff879c9ee84ed5c3
# coding=utf-8 import unittest from loganalysis.log import Log from loganalysis.log import LogFile class TestLog(unittest.TestCase): '''Log单元测试类''' @classmethod def setUpClass(cls): pass @classmethod def tearDownClass(cls): pass def setUp(self): pass def tearDown(self): pass
py
1a3ab6da2db9665a7346da7c47be30a96ec79427
class RelationshipTypes(object): @classmethod def types(cls): types = {} for k, rt in RELATIONSHIP_TYPES.items(): types[k] = rt.copy() types[k].update({ 'relationship_type': k }) return types @classmethod def get_type(cls, relationship_type_id): return cls.types().get(relationship_type_id, None) @classmethod def valid_relationship_hash(cls, relationship_type, related_model, endpoint): return dict( relationship_type=relationship_type, related_model=related_model, related_model_endpoint=endpoint) @classmethod def valid_relationship(cls, obj_type, name, rel): if 'symmetric' in rel and rel['symmetric']: if rel['source_type'] == obj_type and rel['target_type'] == obj_type: return cls.valid_relationship_hash(name, obj_type, 'both') else: if rel['source_type'] == obj_type: return cls.valid_relationship_hash( name, rel['target_type'], 'destination') if rel['target_type'] == obj_type: return cls.valid_relationship_hash( name, rel['source_type'], 'source') @classmethod def valid_relationship_helper(cls, obj_type): return [ cls.valid_relationship(obj_type, name, rel) for name, rel in cls.types().items()] @classmethod def valid_relationships(cls, obj_type): if not isinstance(obj_type, (str, unicode)): if not isinstance(obj_type, type): obj_type = obj_type.__class__ obj_type = obj_type.__name__ return [vr for vr in cls.valid_relationship_helper(obj_type) if vr] RELATIONSHIP_TYPES = { 'data_asset_has_process': { 'source_type': "DataAsset", 'target_type': "Process", 'forward_phrase': "has", 'reverse_phrase': "is a process for", 'forward_description': "This data asset relies upon the following processes.", 'reverse_description': "This process supports the following data assets." }, 'data_asset_relies_upon_data_asset': { 'source_type': "DataAsset", 'target_type': "DataAsset", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This data asset relies upon the following data assets.", 'reverse_description': "This data asset supports the following data assets." }, 'data_asset_relies_upon_facility': { 'source_type': "DataAsset", 'target_type': "Facility", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This data asset relies upon the following facilities.", 'reverse_description': "This facility supports the following data assets." }, 'data_asset_relies_upon_system': { 'source_type': "DataAsset", 'target_type': "System", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This data asset relies upon the following systems.", 'reverse_description': "This system supports the following data assets." }, 'facility_has_process': { 'source_type': "Facility", 'target_type': "Process", 'forward_phrase': "has", 'reverse_phrase': "is a process for", 'forward_description': "This facility relies upon the following processes.", 'reverse_description': "This process supports the following facilities." }, 'facility_relies_upon_data_asset': { 'source_type': "Facility", 'target_type': "DataAsset", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This facility relies upon the following data assets.", 'reverse_description': "This data asset supports the following facilities." }, 'facility_relies_upon_facility': { 'source_type': "Facility", 'target_type': "Facility", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This facility relies upon the following facilities.", 'reverse_description': "This facility supports the following facilities." }, 'facility_relies_upon_system': { 'source_type': "Facility", 'target_type': "System", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This facility relies upon the following systems.", 'reverse_description': "This system supports the following facilities." }, 'market_has_process': { 'source_type': "Market", 'target_type': "Process", 'forward_phrase': "has", 'reverse_phrase': "is a process for", 'forward_description': "This market relies upon the following processes.", 'reverse_description': "This process supports the following markets." }, 'market_includes_market': { 'source_type': "Market", 'target_type': "Market", 'forward_phrase': "includes", 'reverse_phrase': "is included in", 'forward_description': "This market includes the following markets.", 'reverse_description': "This market is included in the following markets." }, 'market_relies_upon_data_asset': { 'source_type': "Market", 'target_type': "DataAsset", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This market relies upon the following data assets.", 'reverse_description': "This data asset supports the following markets." }, 'market_relies_upon_facility': { 'source_type': "Market", 'target_type': "Facility", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This market relies upon the following facilities.", 'reverse_description': "This facility supports the following markets." }, 'market_relies_upon_system': { 'source_type': "Market", 'target_type': "System", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This market relies upon the following systems.", 'reverse_description': "This system supports the following markets." }, 'org_group_has_process': { 'source_type': "OrgGroup", 'target_type': "Process", 'forward_phrase': "has", 'reverse_phrase': "is a process for", 'forward_description': "This org group relies upon the following processes.", 'reverse_description': "This process supports the following org groups." }, 'org_group_is_affiliated_with_org_group': { 'source_type': "OrgGroup", 'target_type': "OrgGroup", 'symmetric': True, 'forward_phrase': "is affiliated/collaborates with", 'reverse_phrase': "is affiliated/collaborates with", 'forward_description': "This org group is affiliated/collaborates with the following org groups.", 'reverse_description': "This org group is affiliated/collaborates with the following org groups." }, 'org_group_is_responsible_for_data_asset': { 'source_type': "OrgGroup", 'target_type': "DataAsset", 'forward_phrase': "is responsible for", 'reverse_phrase': "is overseen by", 'forward_description': "This org group is responsible for the following data assets.", 'reverse_description': "This data asset is overseen by the following org groups." }, 'org_group_is_responsible_for_facility': { 'source_type': "OrgGroup", 'target_type': "Facility", 'forward_phrase': "is responsible for", 'reverse_phrase': "is overseen by", 'forward_description': "This org group is responsible for the following facilities.", 'reverse_description': "This facility is overseen by the following org groups." }, 'org_group_is_responsible_for_market': { 'source_type': "OrgGroup", 'target_type': "Market", 'forward_phrase': "is responsible for", 'reverse_phrase': "is overseen by", 'forward_description': "This org group is responsible for the following markets.", 'reverse_description': "This market is overseen by the following org groups." }, 'org_group_is_responsible_for_org_group': { 'source_type': "OrgGroup", 'target_type': "OrgGroup", 'forward_phrase': "is responsible for", 'reverse_phrase': "is overseen by", 'forward_description': "This org group is responsible for the following org groups.", 'reverse_description': "This org group is overseen by the following org groups." }, 'org_group_is_responsible_for_process': { 'source_type': "OrgGroup", 'target_type': "Process", 'forward_phrase': "is responsible for", 'reverse_phrase': "is overseen by", 'forward_description': "This org group is responsible for the following processes.", 'reverse_description': "This process is overseen by the following org groups." }, 'org_group_is_responsible_for_product': { 'source_type': "OrgGroup", 'target_type': "Product", 'forward_phrase': "is responsible for", 'reverse_phrase': "is overseen by", 'forward_description': "This org group is responsible for the following products.", 'reverse_description': "This product is overseen by the following org groups." }, 'org_group_is_responsible_for_project': { 'source_type': "OrgGroup", 'target_type': "Project", 'forward_phrase': "is responsible for", 'reverse_phrase': "is overseen by", 'forward_description': "This org group is responsible for the following projects.", 'reverse_description': "This project is overseen by the following org groups." }, 'org_group_is_responsible_for_system': { 'source_type': "OrgGroup", 'target_type': "System", 'forward_phrase': "is responsible for", 'reverse_phrase': "is overseen by", 'forward_description': "This org group is responsible for the following systems.", 'reverse_description': "This system is overseen by the following org groups." }, 'org_group_relies_upon_data_asset': { 'source_type': "OrgGroup", 'target_type': "DataAsset", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This org group relies upon the following data assets.", 'reverse_description': "This data asset supports the following org groups." }, 'org_group_relies_upon_facility': { 'source_type': "OrgGroup", 'target_type': "Facility", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This org group relies upon the following facilities.", 'reverse_description': "This facility supports the following org groups." }, 'org_group_relies_upon_org_group': { 'source_type': "OrgGroup", 'target_type': "OrgGroup", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This org group relies upon the following org groups.", 'reverse_description': "This org group supports the following org groups." }, 'org_group_relies_upon_system': { 'source_type': "OrgGroup", 'target_type': "System", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This org group relies upon the following systems.", 'reverse_description': "This system supports the following org groups." }, 'product_has_process': { 'source_type': "Product", 'target_type': "Process", 'forward_phrase': "has", 'reverse_phrase': "is a process for", 'forward_description': "This product relies upon the following processes.", 'reverse_description': "This process supports the following products." }, 'product_is_affiliated_with_product': { 'source_type': "Product", 'target_type': "Product", 'symmetric': True, 'forward_phrase': "is affiliated/collaborates with", 'reverse_phrase': "is affiliated/collaborates with", 'forward_description': "This product is affiliated/collaborates with the following products.", 'reverse_description': "This product is affiliated/collaborates with the following products." }, 'product_is_sold_into_market': { 'source_type': "Product", 'target_type': "Market", 'forward_phrase': "is sold into", 'reverse_phrase': "is a market for", 'forward_description': "This product is sold into the following markets.", 'reverse_description': "This market is a market for the following products." }, 'product_relies_upon_data_asset': { 'source_type': "Product", 'target_type': "DataAsset", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This product relies upon the following data assets.", 'reverse_description': "This data asset supports the following products." }, 'product_relies_upon_facility': { 'source_type': "Product", 'target_type': "Facility", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This product relies upon the following facilities.", 'reverse_description': "This facility supports the following products." }, 'product_relies_upon_product': { 'source_type': "Product", 'target_type': "Product", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This product relies upon the following products.", 'reverse_description': "This product supports the following products." }, 'product_relies_upon_system': { 'source_type': "Product", 'target_type': "System", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This product relies upon the following systems.", 'reverse_description': "This system supports the following products." }, 'program_applies_to_data_asset': { 'source_type': "Program", 'target_type': "DataAsset", 'forward_phrase': "applies to", 'reverse_phrase': "is within scope of", 'forward_description': "This program applies to the following data assets.", 'reverse_description': "This data asset is within scope of the following programs." }, 'program_applies_to_facility': { 'source_type': "Program", 'target_type': "Facility", 'forward_phrase': "applies to", 'reverse_phrase': "is within scope of", 'forward_description': "This program applies to the following facilities.", 'reverse_description': "This facility is within scope of the following programs." }, 'program_applies_to_market': { 'source_type': "Program", 'target_type': "Market", 'forward_phrase': "applies to", 'reverse_phrase': "is within scope of", 'forward_description': "This program applies to the following markets.", 'reverse_description': "This market is within scope of the following programs." }, 'program_applies_to_org_group': { 'source_type': "Program", 'target_type': "OrgGroup", 'forward_phrase': "applies to", 'reverse_phrase': "is within scope of", 'forward_description': "This program applies to the following org groups.", 'reverse_description': "This org group is within scope of the following programs." }, 'program_applies_to_process': { 'source_type': "Program", 'target_type': "Process", 'forward_phrase': "applies to", 'reverse_phrase': "is within scope of", 'forward_description': "This program applies to the following processes.", 'reverse_description': "This process is within scope of the following programs." }, 'program_applies_to_product': { 'source_type': "Program", 'target_type': "Product", 'forward_phrase': "applies to", 'reverse_phrase': "is within scope of", 'forward_description': "This program applies to the following products.", 'reverse_description': "This product is within scope of the following programs." }, 'program_applies_to_project': { 'source_type': "Program", 'target_type': "Project", 'forward_phrase': "applies to", 'reverse_phrase': "is within scope of", 'forward_description': "This program applies to the following projects.", 'reverse_description': "This project is within scope of the following programs." }, 'program_applies_to_system': { 'source_type': "Program", 'target_type': "System", 'forward_phrase': "applies to", 'reverse_phrase': "is within scope of", 'forward_description': "This program applies to the following systems.", 'reverse_description': "This system is within scope of the following programs." }, 'project_has_process': { 'source_type': "Project", 'target_type': "Process", 'forward_phrase': "has", 'reverse_phrase': "is a process for", 'forward_description': "This project relies upon the following processes.", 'reverse_description': "This process supports the following projects." }, 'project_relies_upon_data_asset': { 'source_type': "Project", 'target_type': "DataAsset", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This project relies upon the following data assets.", 'reverse_description': "This data asset supports the following projects." }, 'project_relies_upon_facility': { 'source_type': "Project", 'target_type': "Facility", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This project relies upon the following facilities.", 'reverse_description': "This facility supports the following projects." }, 'project_relies_upon_system': { 'source_type': "Project", 'target_type': "System", 'forward_phrase': "relies upon", 'reverse_phrase': "supports", 'forward_description': "This project relies upon the following systems.", 'reverse_description': "This system supports the following projects." }, 'project_targets_data_asset': { 'source_type': "Project", 'target_type': "DataAsset", 'forward_phrase': "targets", 'reverse_phrase': "is targeted by", 'forward_description': "This project targets the following data assets.", 'reverse_description': "This data asset is targeted by the following projects." }, 'project_targets_facility': { 'source_type': "Project", 'target_type': "Facility", 'forward_phrase': "targets", 'reverse_phrase': "is targeted by", 'forward_description': "This project targets the following facilities.", 'reverse_description': "This facility is targeted by the following projects." }, 'project_targets_market': { 'source_type': "Project", 'target_type': "Market", 'forward_phrase': "targets", 'reverse_phrase': "is targeted by", 'forward_description': "This project targets the following markets.", 'reverse_description': "This market is targeted by the following projects." }, 'project_targets_org_group': { 'source_type': "Project", 'target_type': "OrgGroup", 'forward_phrase': "targets", 'reverse_phrase': "is targeted by", 'forward_description': "This project targets the following org groups.", 'reverse_description': "This org group is targeted by the following projects." }, 'project_targets_product': { 'source_type': "Project", 'target_type': "Product", 'forward_phrase': "targets", 'reverse_phrase': "is targeted by", 'forward_description': "This project targets the following products.", 'reverse_description': "This product is targeted by the following projects." }, 'risk_is_a_threat_to_data_asset': { 'source_type': "Risk", 'target_type': "DataAsset", 'forward_phrase': "is a threat to", 'reverse_phrase': "is vulnerable to", 'forward_description': "This risk is a threat to the following data assets.", 'reverse_description': "This data asset is vulnerable to the following risks." }, 'risk_is_a_threat_to_facility': { 'source_type': "Risk", 'target_type': "Facility", 'forward_phrase': "is a threat to", 'reverse_phrase': "is vulnerable to", 'forward_description': "This risk is a threat to the following facilities.", 'reverse_description': "This faciliy is vulnerable to the following risks." }, 'risk_is_a_threat_to_market': { 'source_type': "Risk", 'target_type': "Market", 'forward_phrase': "is a threat to", 'reverse_phrase': "is vulnerable to", 'forward_description': "This risk is a threat to the following markets.", 'reverse_description': "This market is vulnerable to the following risks." }, 'risk_is_a_threat_to_org_group': { 'source_type': "Risk", 'target_type': "OrgGroup", 'forward_phrase': "is a threat to", 'reverse_phrase': "is vulnerable to", 'forward_description': "This risk is not a threat to the following org groups.", 'reverse_description': "This org group is vulnerable to the following risks." }, 'risk_is_a_threat_to_process': { 'source_type': "Risk", 'target_type': "Process", 'forward_phrase': "is a threat to", 'reverse_phrase': "is vulnerable to", 'forward_description': "This risk is a threat to the following processes.", 'reverse_description': "This process is vulnerable to the following risks." }, 'risk_is_a_threat_to_product': { 'source_type': "Risk", 'target_type': "Product", 'forward_phrase': "is a threat to", 'reverse_phrase': "is vulnerable to", 'forward_description': "This risk is a threat to the following products.", 'reverse_description': "This product is vulnerable to the following risks." }, 'risk_is_a_threat_to_project': { 'source_type': "Risk", 'target_type': "Project", 'forward_phrase': "is a threat to", 'reverse_phrase': "is vulnerable to", 'forward_description': "This risk is a threat to the following projects.", 'reverse_description': "This project is vulnerable to the following risks." }, 'risk_is_a_threat_to_system': { 'source_type': "Risk", 'target_type': "System", 'forward_phrase': "is a threat to", 'reverse_phrase': "is vulnerable to", 'forward_description': "This risk is a threat to the following systems.", 'reverse_description': "This system is vulnerable to the following risks." }, }
py
1a3ab6e628bfc63ec255d399607a790cd5ce7107
__author__ = "Katri Leino" __copyright__ = "Copyright (c) 2018, Aalto Speech Research" import pybrain from scipy import * import numpy as np import logging from UIEnv import UI, UITask from initialParams import initializeParams ''' ## Evaluation function Computes the KLM estimate for the policy. INPUT * av_table : Policy's Q-table. * ui_env : UI class object. Defines the environment * task : UITask class object. Defines the task. * goal : is goal achieved (True / False) * params : initializeParams class object that holds KLM and environment parameters. OUTPUT * KLM estimate (seconds) # best_path : A list of states in which order they are visited. ''' def evaluation(av_table, ui_env, task, goal, params): time_klm = 0 # Set environment parameters ui_env.reset() task.reset() current_state = ui_env.getSensors() # Save best path best_path = [0] # start with starting point steps = 0 time_klm = 0 prev_action = -1 while not(goal): action = av_table.getMaxAction(current_state) best_path.append(action+1) task.performAction(action) # Remove used action allowed_actions = np.where(np.array(task.env.visited_states) == 0)[0] av_table.setAllowedActions(allowed_actions) # Set allowed actions ID = -task.getReward() # ID for Fitts' Law time_klm = time_klm + params.fitts_a + params.fitts_b*ID + 0.31*100 current_state = ui_env.getSensors() task.prev_action = action # For optimization if steps > 10: time_klm = -1 # Discard whole UI print 'Policy not learned' print ui_env.env # Print visited states print av_table.params logging.warn('Policy not learned or UI is not allowed ') logging.warn(ui_env.env) logging.warn(av_table.params) return -1 steps = steps+1 prev_action = action if task.isFinished(): break best_path.append(ui_env.num_of_actions) # Last action is the confirmation ui_env.reset() task.reset() return time_klm/100, best_path
py
1a3abac3875e92baa5019ba3e1f68ce187793dde
#!/usr/bin/env python from __future__ import print_function import thread import socket import argparse import sys, time, os, glob, shutil, math, datetime from tmuxsend import TmuxSend def run_server(port): # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #sock.settimeout(3) # Bind the socket to the port server_address = ('', port) sock.bind(server_address) sock.listen(1) print("Speech server started on port %d ..." %port) print("Speech server commands available [@audio, @audiokill]") print("Example: echo \"@audio\" | netcat -w 1 localhost %d" %port) print("TTS command: echo \"TTS[en] hello!\" | netcat -w 1 localhost 9001") tmux = TmuxSend('bringup', ['audio server','cmd']) connected = False dorun = True while dorun: if not connected: print("-- Waiting for connection ...") while (dorun and not connected): try: # Wait for a connection connection, client_address = sock.accept() connected = True print ('-- Connection from %s' %client_address[0]) except KeyboardInterrupt: print("User interrupt (quit)") dorun = False except Exception as e: print(e) pass # keep listening if not dorun: return # print("-- Waiting for data...") data = None while dorun and connected and data is None: # receive data try: #connection.settimeout(3) # timeout when listening (exit with CTRL+C) data = connection.recv(320) # blocking data = data.strip() except KeyboardInterrupt: print("User interrupt (quit)") dorun = False except socket.timeout: data = None print("socket timeout") if data is not None: if len(data)==0: connected = False else: print(data) folder = "~/src/marrtino_apps/audio" if data=='@audio': tmux.cmd(0,'cd %s' %folder) tmux.cmd(0,'python audio_server.py') elif data=='@audiokill': tmux.Cc(0) else: print('Unknown command %s') if __name__ == '__main__': default_port = 9239 parser = argparse.ArgumentParser(description='speech bringup') parser.add_argument('-server_port', type=int, default=default_port, help='server port') args = parser.parse_args() run_server(args.server_port)
py
1a3abbfbadd37fb39574e9a0b778f7540581a417
#!/usr/bin/env python import click import json import os import shutil import subprocess import uuid LOGISTICIAN_ROOT = os.path.dirname(os.path.abspath(__file__)) CONFIG_PATH = os.path.expanduser("~/.logistician/") def random_id(): return str(uuid.uuid4()).split("-")[0] def write_to_file(path, contents): f = open(path, "w") f.write(contents) f.close() def from_template_file(template_file, vars): f = open(template_file, "r") template = f.read() f.close() return template % vars def create_config_directory(): if not os.path.exists(CONFIG_PATH): os.makedirs(CONFIG_PATH) def load_params(experiment_path): params_filename = os.path.join(experiment_path, "parameters.json") g = open(params_filename) params = json.load(g) g.close() return params def echo_command_string(s): click.secho(s, fg='green') def verbose_call(cmd): echo_command_string(subprocess.list2cmdline(cmd)) subprocess.call(cmd) def local_docker_command(params): return params.get("local_docker_command", "docker") def remote_docker_command(params): return params.get("remote_docker_command", "docker") def config(): """ Interactively create config file """ create_config_directory() docker_username = click.prompt("Please enter your Docker Hub username") docker_repository = click.prompt("Please enter your preferred Docker repository name", "experiments") aws_access_key = click.prompt("Please enter your AWS access key (e.g., AKIAIOSFODNN7EXAMPLE)") aws_secret_key = click.prompt("Please enter your AWS secret key (e.g., wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY)") configuration = { "aws_access_key": aws_access_key, "aws_secret_key": aws_secret_key, "docker_username": docker_username, "docker_repository": docker_repository } f = open(os.path.join(CONFIG_PATH, "config.json"), "w") json.dump(configuration, f) f.close() def create_ssh_key(): """ Create and store SSH key """ create_config_directory() private_key_path = os.path.join(CONFIG_PATH, "ssh-key") if os.path.exists(private_key_path): click.echo("File already exists at {0}".format(private_key_path)) else: verbose_call(["ssh-keygen", "-t", "rsa", "-b", "4096", "-f", private_key_path, "-P", ""]) def build(experiment_path): """ Build Docker image for experiment """ params = load_params(experiment_path) experiment_name = params["experiment_name"] click.echo("Building Docker image for {0}".format(experiment_name)) verbose_call([local_docker_command(params), "build", "-t", experiment_name, experiment_path]) click.echo("Docker build done.") def get_project_path(file_path): cmd = "cd '{0}' && git rev-parse --show-toplevel".format(file_path) echo_command_string(cmd) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) p.wait() return p.stdout.read().strip() ExperimentPathType = click.Path(exists=True, file_okay=False, dir_okay=True, writable=True, readable=True, resolve_path=True) @click.group() def cli(): pass @click.command() @click.pass_context def setup(ctx): """ Run initial interactive setup for Logistician """ click.echo("This is the interactive setup for Logistician.") config() create_ssh_key() click.echo("Configuration done.") @click.command() @click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd()) def sync(experiment_path): """ Sync all data from cloud to local machine """ # Load IP addresses machines_filename = os.path.join(experiment_path, "machines.txt") if not os.path.exists(machines_filename): click.echo("Machine file {0} does not exist. Can't sync.".format(machines_filename)) return f = open(machines_filename) machines = [line.strip().split(", ") for line in f.read().strip().split("\n")] f.close() # Load AWS AMI user params = load_params(experiment_path) aws_ami_user = params["aws_ami_user"] # Create data folder if it doesn't exist verbose_call(["mkdir", "-p", os.path.join(experiment_path, "data/")]) docker_command = remote_docker_command(params) for (ip, condition) in machines: remote_address = "{0}@{1}".format(aws_ami_user, ip) local_path = os.path.join(experiment_path, "data/", condition) click.echo("Syncing {0} to {1}".format(remote_address, local_path)) # Copy latest Docker logs to remote data directory verbose_call(["ssh", "-o", "StrictHostKeyChecking no", "-i", "~/.logistician/ssh-key", remote_address, "sudo bash -c '" + docker_command + " logs `" + docker_command + " ps -aq | head -n 1` > /data/logs/docker.txt'"]) # Retrieve remote data directory verbose_call(["rsync", "-azvv", "-e", "ssh -i ~/.logistician/ssh-key", "{0}:/data/".format(remote_address), local_path]) click.echo("Syncing done.") @click.command() @click.option('--options', '-o', help='Options to pass to experiment script', default='') @click.option('--data_readonly', help='Data folder to read from (optional)', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=False, readable=True, resolve_path=True), default=None) @click.option('--clone/--no-clone', help='Clone from remote repo, don\'t use project folder', default=False) @click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd()) def run(experiment_path, clone, options, data_readonly): """ Run experiment locally """ build(experiment_path) params = load_params(experiment_path) experiment_name = params["experiment_name"] click.echo("Running {0} with options '{1}'".format(experiment_name, options)) if clone: # If we don't mount project volume, it will be cloned clone_args = [] else: project_path = get_project_path(experiment_path) clone_args = ["-v", "{0}:/project".format(project_path)] if data_readonly: data_args = ["-v", "{0}:/data:ro".format(data_readonly)] else: data_args = [] cmd = [local_docker_command(params), "run"] + clone_args + data_args + ["-e", "OPTIONS={0}".format(options), "-it", experiment_name] verbose_call(cmd) click.echo("Experiment done.") @click.command() @click.option('--volume/--no-volume', help='Mount project folder as /project volume in Docker', default=True) @click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd()) def shell(experiment_path, volume=True): """ Open shell in experiment environment """ build(experiment_path) params = load_params(experiment_path) experiment_name = params["experiment_name"] docker_command = local_docker_command(params) click.echo("Opening shell for {0}".format(experiment_name)) if volume: project_path = get_project_path(experiment_path) verbose_call([docker_command, "run", "-v", "{0}:/project".format(project_path), "-it", experiment_name, "bash", "-c", "cd /project && bash"]) else: verbose_call([docker_command, "run", "-it", experiment_name, "bash"]) click.echo("Shell exited.") @click.command() @click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd()) def deploy(experiment_path): """ Run experiment in the cloud """ build(experiment_path) click.echo("Deploying {0} to cloud".format(experiment_path)) params_file = os.path.join(experiment_path, "parameters.json") config_file = os.path.join(CONFIG_PATH, "config.json") terraform_aws_path = os.path.join(LOGISTICIAN_ROOT, "terraform/aws") verbose_call(["terraform", "apply", '-var-file={0}'.format(params_file), '-var-file={0}'.format(config_file), terraform_aws_path]) click.echo("Deployment done.") @click.command() @click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd()) def status(experiment_path): """ Show deployment status """ verbose_call(["terraform", "show", os.path.join(experiment_path, "terraform.tfstate")]) @click.command() @click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd()) def terminate(experiment_path): """ Terminate cloud experiment """ click.echo("Terminating {0} in cloud".format(experiment_path)) params_file = os.path.join(experiment_path, "parameters.json") config_file = os.path.join(CONFIG_PATH, "config.json") terraform_aws_path = os.path.join(LOGISTICIAN_ROOT, "terraform/aws") verbose_call(["terraform", "destroy", '-var-file={0}'.format(params_file), '-var-file={0}'.format(config_file), terraform_aws_path]) click.echo("Experiment terminated.") @click.command() @click.option('--base', help='Path to previous experiment used as base (optional)', type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True, resolve_path=True), default=None) @click.argument('experiment_path', type=click.Path(exists=False), default=lambda: None) def create(experiment_path, base): """ Run interactive setup for a new experiment """ if not experiment_path: experiment_path = click.prompt("Path for new experiment", default=os.path.join(os.getcwd(), random_id())) if os.path.exists(experiment_path): click.echo("Experiment path should not exist") return click.echo("This script will interactively create a new experiment stored at:") click.echo(os.path.abspath(experiment_path) + "\n") # Create folder for new experiment os.makedirs(experiment_path) # Get experiment name dirname = os.path.basename(os.path.dirname(os.path.join(experiment_path, ''))) experiment_name = click.prompt("Globally unique experiment name", default=dirname) if base: create_derived_experiment(experiment_path, experiment_name, base) else: create_fresh_experiment(experiment_path, experiment_name) def create_derived_experiment(experiment_path, experiment_name, base): # Copy over Dockerfile dockerfile_path = os.path.join(experiment_path, "Dockerfile") shutil.copyfile(os.path.join(base, "Dockerfile"), dockerfile_path); # Copy over parameter.json, replacing experiment_name with new one f = open(os.path.join(base, "parameters.json")) params = json.load(f) f.close() params["experiment_name"] = experiment_name parameters_path = os.path.join(experiment_path, "parameters.json") f = open(parameters_path, "w") json.dump(params, f, indent=2, sort_keys=True) f.close() show_experiment_info(experiment_path, dockerfile_path, parameters_path) def create_fresh_experiment(experiment_path, experiment_name): git_remote_url = subprocess.check_output(["git", "config", "--get", "remote.origin.url"]).strip() project_git_url = click.prompt("Remote Git URL", default=git_remote_url) experiment_cmd = click.prompt("Experiment command (relative to project root)") settings = { "experiment_name": experiment_name, "project_git_url": project_git_url, "experiment_cmd": experiment_cmd } # Create Dockerfile dockerfile_template_path = os.path.join(LOGISTICIAN_ROOT, "templates/experiment/Dockerfile") dockerfile_contents = from_template_file(dockerfile_template_path, settings) dockerfile_path = os.path.join(experiment_path, "Dockerfile") write_to_file(dockerfile_path, dockerfile_contents) # Create parameters.json parameters_template_path = os.path.join(LOGISTICIAN_ROOT, "templates/experiment/parameters.json") parameters_contents = from_template_file(parameters_template_path, settings) parameters_path = os.path.join(experiment_path, "parameters.json") write_to_file(parameters_path, parameters_contents) show_experiment_info(experiment_path, dockerfile_path, parameters_path) def show_experiment_info(experiment_path, dockerfile_path, parameters_path): # Instruct user to edit Dockerfile click.echo("\nExperiment created.") click.echo("\nYou can now edit the Dockerfile and parameters:") click.echo("Dockerfile: {0}".format(dockerfile_path)) click.echo("Parameters: {0}".format(parameters_path)) click.echo("\nOnce done editing, you can run your experiment:") click.echo("logistician run {0}".format(os.path.relpath(experiment_path))) cli.add_command(create) cli.add_command(deploy) cli.add_command(run) cli.add_command(setup) cli.add_command(shell) cli.add_command(status) cli.add_command(sync) cli.add_command(terminate)
py
1a3abc0a88cdf067c97741a9222f2009ae8bed18
# price_dictionary = { # "banana": 1.50, # "avocado": 0.99, # "heirloom tomato": 0.89, # "cherry tomato pack": 3.00 #} # Welcome to the birthday dictionary. We know the birthdays of: # Albert Einstein # Benjamin Franklin # Ada Lovelace # >>> Who's birthday do you want to look up? # Benjamin Franklin # >>> Benjamin Franklin's birthday is 01/17/1706. bDayDict = { "Albert Einstein": "03/14/1879", "Benjamin Franklin": "01/17/1706", "Ada Lovelace": "12/10/1815", } print("Welcome to the birthday dictionary. We know the birthdays of: \n Albert Einstein \n Benjamin Franklin \n Ada Lovelace \n") name = input("Who's brithday do you want to look up?") if (name == "Albert Einstein"): print("Albert Einstein's birthday is " + bDayDict["Albert Einstein"]) elif (name =="Benjamin Franklin"): print("Benjamin Franklin's birthday is " + bDayDict["Benjamin Franklin"]) elif (name == "Ada Lovelace"): print("Ada Lovelace's birthday is " + bDayDict["Ada Lovelace"]) else: print("Sorry, that person is not known in this birthday dictionary.")
py
1a3abd1db2d66ecb4f72910f7326fda68766d7c3
""" This script takes a pre-trained Spatial Transformer and applies it to an unaligned dataset to create an aligned and filtered dataset in an unsupervised fashion. By default, this script will only use the similarity transformation portion of the Spatial Transformer (rotation + crop) to avoid introducing warping artifacts. """ import os import sys sys.path.insert(1, os.path.dirname(sys.path[0])) import torch import numpy as np from PIL import Image from tqdm import tqdm from prepare_data import create_dataset, border_pad from models import ComposedSTN from models.spatial_transformers.warping_heads import SimilarityHead from applications import base_eval_argparse, load_stn, determine_flips from applications.flow_scores import filter_dataset from utils.distributed import setup_distributed, primary, get_rank, all_gatherv, synchronize, get_world_size from datasets import MultiResolutionDataset def apply_congealing(args, dataset, stn, stn_full, out_path, device, rank, n_processes, **stn_args): def prepro(x, from_np=False): if from_np: x = np.asarray(x) return torch.from_numpy(x).float().div_(255.0).add_(-0.5).mul_(2.0).permute(2, 0, 1).unsqueeze_(0).to(device) total = 0 prefix = chr(ord('a') + rank) print(f'({rank}) Using prefix {prefix}') pbar = tqdm if rank == 0 else lambda x: x indices = torch.arange(rank, len(dataset), n_processes) one_hot = torch.tensor([[[0, 0, 1]]], dtype=torch.float, device=device) used_indices = [] for i in pbar(indices): with torch.no_grad(): x = dataset[i.item()] # (1, C, H, W) w, h = x.size size = max(w, h) x_big = prepro(border_pad(x, size, resize=False, to_pil=False)) # (1, C, size, size) x_in = prepro(border_pad(x, args.flow_size, to_pil=False)) # (1, C, flow_size, flow_size) x_in, flip_indices, warp_policy = determine_flips(args, stn_full, None, x_in) x_big = torch.where(flip_indices.view(-1, 1, 1, 1), x_big.flip(3,), x_big) image_bounds = torch.tensor([[h, w]], dtype=torch.float, device='cuda') try: aligned, M, oob = stn(x_in, return_flow=True, return_out_of_bounds=True, input_img_for_sampling=x_big, output_resolution=args.output_resolution, image_bounds=image_bounds, **stn_args) except RuntimeError: print(f'Rank {rank}: WARNING: Ran out of GPU memory, skipping...') continue # The scale of the similarity transform can be extracted from our affine matrix # by taking the square-root of its determinant: M = torch.cat([M, one_hot], 1) scale = torch.det(M).sqrt_() too_low_res = (scale.item() * min(w, h)) < args.min_effective_resolution # We don't want to include images that can only be aligned by extrapolating a significant number of pixels # beyond the image boundary: if not (too_low_res or oob.item()): used_indices.append(i) write_image_batch(aligned, out_path, start_index=total, prefix=prefix) total += aligned.size(0) print(f'({rank}) Saved {total} images') used_indices = torch.stack(used_indices).to(device) return used_indices def write_image_batch(images, out_path, start_index=0, prefix=''): def norm(img, min, max): img.clamp_(min=min, max=max) img.add_(-min).div_(max - min) norm(images, -1, 1) ndarr = images.mul(255).add_(0.5).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu', torch.uint8).numpy() for i in range(ndarr.shape[0]): index = i + start_index Image.fromarray(ndarr[i]).save(f'{out_path}/{prefix}{index:07}.png') def align_and_filter_dataset(args, t): # The aligned + filtered images will be saved directly as pngs to temp_folder below: temp_folder = f'{args.out}_imagefolder' if primary(): os.makedirs(temp_folder, exist_ok=True) os.makedirs(args.out, exist_ok=True) # Step 1: Apply the STN to every image in the dataset dataset = MultiResolutionDataset(args.real_data_path, resolution=args.real_size, transform=None) if args.flow_scores is not None: # Filter the dataset based on flow scores: dataset = filter_dataset(dataset, args.flow_scores, args.fraction_retained) if isinstance(t, ComposedSTN): t_sim = t.stns[0] # Only use the similarity transformation else: t_sim = t assert isinstance(t_sim.warp_head, SimilarityHead), 'Currently only similarity transformations are supported ' \ 'for this script' used_indices = apply_congealing(args, dataset, t_sim, t, temp_folder, 'cuda', get_rank(), get_world_size(), iters=args.iters, padding_mode=args.padding_mode) synchronize() used_indices = all_gatherv(used_indices) # Step 2: Create an lmdb from temp_folder: if primary(): create_dataset(args.out, temp_folder, size=args.output_resolution, format='png') used_indices = used_indices.sort().values.cpu() print(f'Saving indices of images (size={used_indices.size(0)})') torch.save(used_indices, f'{args.out}/dataset_indices.pt') print('Done.') if __name__ == '__main__': parser = base_eval_argparse() # Dataset congealing + creation hyperparameters: parser.add_argument("--out", type=str, help='Directory to save output aligned dataset', required=True) parser.add_argument("--output_resolution", type=int, default=256, help='Resolution of output aligned images') parser.add_argument("--flow_scores", default=None, type=str, help='Path to pre-computed flow scores to filter dataset (see flow_scores.py for more info)') parser.add_argument("--fraction_retained", default=1.0, type=float, help='Fraction of dataset images to retain based on flow scores') # Also see --fraction_retained in base_eval_argparse() parser.add_argument("--min_effective_resolution", type=int, default=192, help='Some images will have small objects that the STN successfully aligns. But, you may not ' 'want these aligned images in your dataset because the STN will have produced a large ' 'zoom that yields a low resolution image when resized to output_resolution. Any aligned ' 'image with size less than min_effective_resolution will be excluded from the output ' 'dataset.') args = parser.parse_args() assert args.num_heads == 1, 'Clustering not currently supported for congeal_dataset.py' args.distributed = setup_distributed(args.local_rank) t_ema = load_stn(args) align_and_filter_dataset(args, t_ema)
py
1a3abd5f919ef8a991067ccd7db7af538ab75436
import json import enum from datetime import datetime from typing import Any, Dict, List, Optional, Union from pydantic import BaseModel, EmailStr, Field, validator from app.models.task import TaskState, TaskType from app.schemas.common import ( Common, DateTimeModelMixin, IdModelMixin, IsDeletedModelMixin, ) class MergeStrategy(enum.IntEnum): stop_upon_conflict = 1 prefer_newest = 2 prefer_oldest = 3 class TaskBase(BaseModel): name: str type: TaskType class TaskParameter(BaseModel): include_datasets: Optional[List[int]] include_train_datasets: Optional[List[int]] include_validation_datasets: Optional[List[int]] include_test_datasets: Optional[List[int]] exclude_datasets: Optional[List[int]] include_classes: Optional[List[str]] exclude_classes: Optional[List[str]] # strategy strategy: Optional[MergeStrategy] = Field( MergeStrategy.prefer_newest, description="strategy to merge multiple datasets" ) # label extra_url: Optional[str] labellers: Optional[List[EmailStr]] # training network: Optional[str] backbone: Optional[str] hyperparameter: Optional[str] # mining model_id: Optional[int] mining_algorithm: Optional[str] top_k: Optional[int] generate_annotations: Optional[bool] class TaskCreate(TaskBase): parameters: Optional[TaskParameter] = Field( None, description="task specific parameters" ) config: Optional[Union[str, Dict]] = Field( None, description="docker runtime configuration" ) @validator("config") def dumps_config( cls, v: Optional[Union[str, Dict]], values: Dict[str, Any] ) -> Optional[str]: if isinstance(v, dict): return json.dumps(v) else: return v class TaskUpdate(BaseModel): name: str class TaskInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, TaskBase): hash: str state: Optional[TaskState] = TaskState.pending progress: Optional[float] = Field(0, description="from 0 to 100") parameters: Optional[str] = Field( description="json dumped input parameters when creating task" ) config: Optional[str] = Field( description="json dumped docker runtime configuration" ) user_id: int = Field(description="task owner's user_id") class Config: orm_mode = True class TaskResult(BaseModel): dataset_id: Optional[int] model_id: Optional[int] error: Optional[Dict] class Task(TaskInDBBase): parameters: Optional[str] result: Optional[TaskResult] config: Optional[str] @validator("parameters") def loads_parameters(cls, v: str, values: Dict[str, Any]) -> Dict[str, Any]: if not v: return {} return json.loads(v) @validator("config") def loads_config(cls, v: str, values: Dict[str, Any]) -> Dict[str, Any]: if not v: return {} return json.loads(v) class Tasks(BaseModel): total: int items: List[Task] class TaskOut(Common): result: Union[Task, Tasks]
py
1a3ac19c48cba3d33ecb624be796a9534b867764
from glob import glob import pandas as pd import process_trial import json, os from tqdm import tqdm print(os.getcwd()) print('Process testset') for test_group in tqdm(glob('data/raw/*/metrics_*.csv')): try: platform = test_group.split('/')[2] df = pd.read_csv(test_group) df['init_time'] = pd.to_datetime(df['init_time']) df.reset_index(inplace=True) for index, row in df.iterrows(): row['times'] = json.loads(row['times']) hw_log = test_group.replace('metrics_', 'hardware_log_') process_trial.compute(row, hw_log, index, platform) except: print(f'ERROR: {test_group}') print('Process testset completed')
py
1a3ac1e7d1eef4878fb6c14bd6f881978ed5ec09
for _ in range(int(input())): x=int(input()) print(0) if x%2==0 else print(1)
py
1a3ac2da4c043f5a5379abfa7e3e2bf9b7a2cb84
#!/usr/bin/env python3 # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 import argparse import os import platform import shutil import subprocess import sys import time from vmstat import capture_sample from vmstat import plot_output from vmstat import print_output_to_file def main(): args = parse_args() ret = os.system(f"cd ../examples && b2 release {args.toolset} stage_client_test stage_connection_tester") save_dir = args.directory print(f"save dir: {save_dir}") if ret != 0: print('ERROR: build failed: %d' % ret) sys.exit(1) rm_file_or_dir(".ses_state") rm_file_or_dir(".resume") if not os.path.exists('checking_benchmark.torrent'): ret = os.system('../examples/connection_tester gen-torrent -s 10000 -n 15 -t checking_benchmark.torrent') if ret != 0: print('ERROR: connection_tester failed: %d' % ret) sys.exit(1) if not os.path.exists(f"{save_dir}/checking_benchmark"): cmd_line = f'../examples/connection_tester gen-data -t checking_benchmark.torrent -P {save_dir}' print(cmd_line) ret = os.system(cmd_line) if ret != 0: print('ERROR: connection_tester failed: %d' % ret) sys.exit(1) for threads in [1, 2, 4, 8, 16, 32, 64]: print("drop caches now. e.g. \"echo 1 | sudo tee /proc/sys/vm/drop_caches\"") input("Press Enter to continue...") run_test(f"{threads}", f"--hashing_threads={threads}", save_dir) def run_test(name, client_arg, save_dir: str): output_dir = 'logs_checking_%s' % name timing_path = os.path.join(output_dir, 'timing.txt') if os.path.exists(timing_path): print('file "{path}" exists, skipping test "{name}"'.format(path=timing_path, name=name)) return rm_file_or_dir(output_dir) try: os.mkdir(output_dir) except Exception: pass rm_file_or_dir(f"{save_dir}/.resume") client_cmd = ('../examples/client_test checking_benchmark.torrent ' '--enable_dht=0 --enable_lsd=0 --enable_upnp=0 --enable_natpmp=0 ' f'-1 {client_arg} -s {save_dir} -f {output_dir}/events.log --alert_mask=all') client_out = open('%s/client.out' % output_dir, 'w+') print('client_cmd: "{cmd}"'.format(cmd=client_cmd)) c = subprocess.Popen(client_cmd.split(' '), stdout=client_out, stderr=client_out, stdin=subprocess.PIPE) start = time.monotonic() if platform.system() == "Linux": out = {} while c.returncode is None: capture_sample(c.pid, start, out) time.sleep(0.1) c.poll() stats_filename = f"{output_dir}/memory_stats.log" keys = print_output_to_file(out, stats_filename) plot_output(stats_filename, keys) else: c.wait() client_out.close() start_time = 0 end_time = 0 for l in open('%s/events.log' % output_dir, 'r'): if 'checking_benchmark: start_checking, m_checking_piece: ' in l \ and start_time == 0: start_time = int(l.split(' ')[0][1:-1]) if 'state changed to: finished' in l \ and start_time != 0: end_time = int(l.split(' ')[0][1:-1]) print('%s: %d' % (name, end_time - start_time)) with open('%s/timing.txt' % output_dir, 'w+') as f: f.write('%s: %d\n' % (name, end_time - start_time)) def rm_file_or_dir(path): """ Attempt to remove file or directory at path """ try: shutil.rmtree(path) except Exception: pass try: os.remove(path) except Exception: pass def parse_args(): p = argparse.ArgumentParser() p.add_argument('--toolset', default="") p.add_argument('--directory', default=".") return p.parse_args() if __name__ == '__main__': main()
py
1a3ac3b350d94c57e4badde2f0ee0ec3daec7b37
from abc import ABCMeta from collections import OrderedDict import yaml from yaml.representer import Representer # Support for abstract classes Representer.add_representer(ABCMeta, Representer.represent_name) class Config: """ Configuration object used for initializing an Agent. It maintains the order from which the attributes have been set. Parameters ---------- configs: Keyword arguments Additional parameters that will be stored. Returns ------- Config object An object containing all configuration details (with possibly nested Config). """ def __init__(self, *args, **kwargs): # We want to maintain the order of the attributes, # this is especially necessary when defining NNs architectures self.__dict__["_attrs"] = OrderedDict() for i, value in enumerate(args): self._nested_loader("attr{}".format(i), value) for key, value in kwargs.items(): self._nested_loader(key, value) def __getattr__(self, value): try: return self.__dict__["_attrs"][value] except: raise AttributeError(value) def __setattr__(self, key, value): self.__dict__["_attrs"][key] = value def __repr__(self): return yaml.dump(self.as_dict(), default_flow_style=False) def __iter__(self): yield from self.as_dict() def _nested_loader(self, key, value): if isinstance(value, OrderedDict): return self.new_section(key, **value) else: setattr(self, key, value) def items(self): return self.as_dict().items() def values(self): return self.as_dict().values() def pop(self, *args, **kwargs): return self.as_dict().pop(*args, **kwargs) def get(self, *args, **kwargs): return self.as_dict().get(*args, **kwargs) def update(self, config): self.as_dict().update(config.as_dict()) def as_dict(self): """ Returns all object attributes as a nested OrderedDict. Returns ------- dict Nested OrderedDict containing all object attributes. """ return self.__dict__["_attrs"] def as_list(self): return list(self.as_dict().values()) def new_section(self, name, **configs): """ Creates a new Config object and add as an attribute of this instance. Parameters ---------- name: str Name of the new section. configs: Keyword arguments Parameters that will be stored in this section, accepts nested parameters. Examples -------- Simple use case:: config.new_section('new_section_name', attr1=value1, attr2=value2, ...) Nested parameters:: config.new_section('new_section_name', attr1=Config(attr1=value1, attr2=value2)) It's possible to access the variable like so:: config.new_section_name.attr1 """ self._nested_loader(name, Config(**configs)) def save(self, file_path): """ Saves current configuration to a JSON file. The configuration is stored as a nested dictionary (maintaining the order). Parameters ---------- file_path: str Path to write the file """ with open(file_path + ".yaml", "w") as f: yaml.dump(self, f, default_flow_style=False) @classmethod def from_default(cls, name): """ Loads configuration from a default agent. Parameters ---------- name: str Name of the desired config file ('VanillaPG', add_more) Returns ------- Config A configuration object loaded from a JSON file """ if name == "PPO": return cls.load("CHANGE") @staticmethod def load(file_path): """ Loads configuration from a JSON file. Parameters ---------- file_path: str Path of the file to be loaded. Returns ------- Config A configuration object loaded from a JSON file """ with open(file_path + ".yaml", "r") as f: return yaml.load(f)
py
1a3ac40ce87c019e34e535d1df811f499038a6da
import json from contextlib import contextmanager from datetime import datetime, timedelta from xml.sax.saxutils import unescape from mock import patch from casexml.apps.case.models import CommCareCase from casexml.apps.case.sharedmodels import CommCareCaseIndex from corehq.apps.domain.shortcuts import create_domain from corehq.apps.tzmigration.timezonemigration import MISSING from corehq.form_processor.backends.couch.dbaccessors import CaseAccessorCouch from corehq.form_processor.interfaces.dbaccessors import FormAccessors from corehq.form_processor.models import CommCareCaseIndexSQL from corehq.form_processor.utils.general import ( clear_local_domain_sql_backend_override, ) from corehq.util.dates import iso_string_to_datetime from corehq.util.test_utils import capture_log_output from .test_migration import BaseMigrationTestCase, Diff, IGNORE, make_test_form from .. import casediff from .. import casedifftool as mod from ..diffrule import ANY from ..statedb import open_state_db class TestCouchSqlDiff(BaseMigrationTestCase): def test_diff(self): self.submit_form(make_test_form("form-1", case_id="case-1")) self.do_migration(case_diff="none") clear_local_domain_sql_backend_override(self.domain_name) with self.augmented_couch_case("case-1") as case: case.age = '35' case.save() self.do_case_diffs() self.compare_diffs([ Diff('case-1', 'diff', ['age'], old='35', new='27'), ]) self.do_migration(forms="missing", case_diff="patch") def test_diff_specific_case(self): self.submit_form(make_test_form("form-1", case_id="case-1")) self.do_migration(case_diff="none") clear_local_domain_sql_backend_override(self.domain_name) with self.augmented_couch_case("case-1") as case: case.age = '35' case.save() self.do_case_diffs(cases="case-1") self.compare_diffs([ Diff('case-1', 'diff', ['age'], old='35', new='27'), ]) def test_pending_diff(self): def diff_none(case_ids, log_cases=None): return casediff.DiffData([]) self.submit_form(make_test_form("form-1", case_id="case-1")) self.do_migration(case_diff='none') clear_local_domain_sql_backend_override(self.domain_name) with self.augmented_couch_case("case-1") as case: case.age = '35' case.save() with patch("corehq.apps.couch_sql_migration.casedifftool.diff_cases", diff_none): result = self.do_case_diffs() self.assertEqual(result, mod.PENDING_WARNING) self.do_case_diffs(cases="pending") self.compare_diffs([ Diff('case-1', 'diff', ['age'], old='35', new='27'), ]) def test_live_diff(self): # do not diff case modified since most recent case created in SQL self.submit_form(make_test_form("form-1", case_id="case-1"), timedelta(minutes=-90)) self.submit_form(make_test_form("form-2", case_id="case-1", age=35)) self.do_migration(live=True, chunk_size=1, case_diff="none") self.assert_backend("sql") case = self._get_case("case-1") self.assertEqual(case.dynamic_case_properties()["age"], '27') self.do_case_diffs() self.compare_diffs(ignore_fail=True) def test_failed_diff(self): self.pool_mock.stop() self.addCleanup(self.pool_mock.start) self.submit_form(make_test_form("form-1", case_id="case-1")) self.do_migration(case_diff="none") # patch init_worker to make subprocesses use the same database # connections as this process (which is operating in a transaction) init_worker_path = "corehq.apps.couch_sql_migration.casedifftool.init_worker" with patch(init_worker_path, mod.global_diff_state), \ patch("corehq.apps.couch_sql_migration.casediff.diff_case") as mock, \ capture_log_output("corehq.apps.couch_sql_migration.parallel") as log: mock.side_effect = Exception("diff failed!") self.do_case_diffs() logs = log.get_output() self.assertIn("error processing item in worker", logs) self.assertIn("Exception: diff failed!", logs) self.compare_diffs() db = open_state_db(self.domain_name, self.state_dir) self.assertEqual(list(db.iter_undiffed_case_ids()), ["case-1"]) def test_reconcile_transaction_order(self): from ..rebuildcase import SortTransactionsRebuild form1 = make_test_form("form-1", age="33", date="2016-08-04T18:25:56.656Z") form2 = make_test_form("form-2", age="32", date="2015-08-04T18:25:56.656Z") self.submit_form(form1) self.submit_form(form2) self.assertEqual(self._get_case("test-case").age, "33") with self.diff_without_rebuild(): self.do_migration() self.compare_diffs([ Diff('test-case', 'diff', ['age'], old='33', new='32'), ]) clear_local_domain_sql_backend_override(self.domain_name) self.do_case_diffs(cases="with-diffs") sql_case = self._get_case("test-case") self.assertEqual(sql_case.dynamic_case_properties()["age"], "33") self.compare_diffs() details = sql_case.transactions[-1].details self.assertEqual(details["reason"], SortTransactionsRebuild._REASON) server_dates = details["original_server_dates"] self.assertEqual(len(server_dates), 1, server_dates) def test_couch_with_missing_forms(self): form1 = make_test_form("form-1", age="33", date="2016-08-04T18:25:56.656Z") form2 = make_test_form("form-2", age="32", date="2015-08-04T18:25:56.656Z") self.submit_form(THING_FORM) self.submit_form(form1) self.submit_form(form2) case = self._get_case("test-case") self.assertEqual(case.age, "33") self.assertEqual(case.thing, "1") del case.thing case.actions = [a for a in case.actions if a.form_id != "thing-form"] case.save() with self.assertRaises(AttributeError): self._get_case("test-case").thing with self.diff_without_rebuild(): self.do_migration() self.compare_diffs([ Diff('test-case', 'diff', ['age'], old='33', new='32'), Diff('test-case', 'missing', ['thing'], old=MISSING, new='1'), ]) clear_local_domain_sql_backend_override(self.domain_name) self.do_case_diffs(cases="with-diffs") sql_case = self._get_case("test-case") self.assertEqual(sql_case.dynamic_case_properties()["age"], "33") self.compare_diffs(changes=[ Diff('test-case', 'missing', ['thing'], old=MISSING, new='1', reason='rebuild case'), ]) self.do_migration(patch=True, diffs=[]) def test_couch_missing_create_case(self): with self.skip_case_and_ledger_updates("thing-form"): self.submit_form(THING_FORM) self.submit_form(UPDATE_FORM) case = self._get_case("test-case") # simulate null properties seen in the wild object.__setattr__(case, "name", None) object.__setattr__(case, "type", None) case.save() with self.diff_without_rebuild(): self.do_migration() self.compare_diffs([ Diff('test-case', 'missing', ['thing'], old=MISSING, new='1'), Diff('test-case', 'set_mismatch', ['xform_ids', '[*]'], old='', new='thing-form'), Diff('test-case', 'type', ['name'], old=None, new='Thing'), Diff('test-case', 'type', ['type'], old=None, new='testing'), ]) self.do_migration(patch=True, diffs=[]) case = self._get_case("test-case") self.assertEqual(case.name, "") self.assertEqual(case.type, "") self.assertEqual(case.dynamic_case_properties()["thing"], "") self.assertEqual(case.xform_ids, ['thing-form', 'update-form', ANY]) def test_case_with_deleted_form(self): # form state=normal / deleted -> missing case one = self.submit_form(make_test_form("one", age=27)) FormAccessors(self.domain_name).soft_delete_forms( [one.form_id], datetime.utcnow(), 'test-deletion') self.do_migration() self.compare_diffs(changes=[ Diff('test-case', 'missing', ['*'], old='*', new=MISSING, reason="deleted forms"), ]) def test_diff_case_with_wrong_domain(self): wrong_domain = create_domain("wrong") self.addCleanup(wrong_domain.delete) self.submit_form(make_test_form("form-1"), domain="wrong") self.do_migration(case_diff="none", domain="wrong") self.do_migration(case_diff="none") clear_local_domain_sql_backend_override(self.domain_name) with capture_log_output("corehq.apps.couch_sql_migration") as log, \ self.augmented_couch_case("test-case") as case: # modify case so it would have a diff (if it were diffed) case.age = '35' case.save() # try to diff case in wrong domain self.do_case_diffs(cases="test-case") self.compare_diffs([ Diff('test-case', 'diff', ['domain'], old='wrong', new=self.domain_name), ]) logs = log.get_output() self.assertIn("couch case test-case has wrong domain: wrong", logs) def test_ledger_dup_transaction_diff(self): product_id = self.create_form_with_duplicate_stock_transaction() self.do_migration(case_diff='none') self.compare_diffs(ignore_fail=True) clear_local_domain_sql_backend_override(self.domain_name) self.do_case_diffs() self.compare_diffs(changes=[Diff( f"test-case/things/{product_id}", reason="duplicate stock transaction", type="diff", path=["balance"], old=2, new=1, kind="stock state", )]) def test_patch_known_properties(self): self.submit_form(make_test_form("form-1", case_id="case-1")) self.do_migration(case_diff="none") clear_local_domain_sql_backend_override(self.domain_name) open_date = datetime(2010, 9, 8) with self.augmented_couch_case("case-1") as case: case.name = "Zena" case.type = "old-type" case.user_id = "old-user" case.owner_id = "old-owner" case.opened_on = open_date case.save() self.do_case_diffs() self.compare_diffs([ Diff('case-1', 'diff', ['name'], old='Zena', new='Xeenax'), Diff('case-1', 'diff', ['owner_id'], old='old-owner', new='3fae4ea4af440efaa53441b5'), Diff('case-1', 'diff', ['type'], old='old-type', new='testing'), Diff('case-1', 'diff', ['user_id'], old='old-user', new='3fae4ea4af440efaa53441b5'), ]) self.do_migration(forms="missing", case_diff="patch") self.assertEqual(self._get_case("case-1").opened_on, open_date) def test_unpatchable_properties(self): date1 = "2018-07-13T11:20:11.381000Z" self.submit_form(make_test_form("form-1", case_id="case-1")) case = self._get_case("case-1") user = case.user_id case.closed = True case.closed_by = "someone" case.closed_on = iso_string_to_datetime(date1) case.external_id = "ext" case.name = "Zena" case.opened_by = "someone" case.server_modified_on = iso_string_to_datetime(date1) case.user_id = "person" case.save() self.do_migration(diffs=[ Diff('case-1', 'diff', ['closed'], old=True, new=False), Diff('case-1', 'diff', ['closed_by'], old='someone', new=''), Diff('case-1', 'diff', ['external_id'], old='ext', new=''), Diff('case-1', 'diff', ['name'], old='Zena', new='Xeenax'), Diff('case-1', 'diff', ['opened_by'], old='someone', new=user), Diff('case-1', 'diff', ['user_id'], old='person', new=user), Diff('case-1', 'type', ['closed_on'], old=date1, new=None), ]) self.do_migration(patch=True, diffs=[]) close2 = iso_string_to_datetime("2015-08-04T18:25:56.656Z") case = self._get_case("case-1") self.assertEqual(case.closed, True) # patched self.assertEqual(case.closed_by, "person") # unpatched self.assertEqual(case.closed_on, close2) # unpatched self.assertEqual(case.external_id, 'ext') # patched, not sure how/why self.assertEqual(case.name, "Zena") # patched self.assertEqual(case.opened_by, user) # unpatched self.assertEqual(case.user_id, "person") # patched self.assertNotEqual(case.server_modified_on, iso_string_to_datetime(date1)) # unpatched form = self._get_form(case.xform_ids[-1]) diffs = json.loads(unescape(form.form_data["diff"])) self.assertEqual(diffs, { "case_id": "case-1", "diffs": [ {"path": ["closed"], "old": True, "new": False, "patch": True}, {"path": ["closed_by"], "old": "someone", "new": "", "patch": False}, {"path": ["closed_on"], "old": date1, "new": None, "patch": False}, {"path": ["external_id"], "old": "ext", "new": "", "patch": False}, {"path": ["name"], "old": "Zena", "new": "Xeenax", "patch": True}, {"path": ["opened_by"], "old": "someone", "new": user, "patch": False}, {"path": ["user_id"], "old": "person", "new": user, "patch": True}, ], }) def test_patch_closed_case(self): from casexml.apps.case.cleanup import close_case self.submit_form(make_test_form("form-1", case_id="case-1")) close_case("case-1", self.domain_name, "system", "test") self.do_migration(case_diff="none") clear_local_domain_sql_backend_override(self.domain_name) with self.augmented_couch_case("case-1") as case: case.name = "Zena" case.save() self.do_case_diffs() self.compare_diffs([ Diff('case-1', 'diff', ['name'], old='Zena', new='Xeenax'), ]) self.do_migration(forms="missing", case_diff="patch") self.assertEqual(self._get_case("case-1").closed, True) self.assert_patched_cases(["case-1"]) def test_patch_case_needing_sql_rebuild(self): with self.skip_case_and_ledger_updates("form-1"): self.submit_form(make_test_form("form-1", age=30)) self.submit_form(make_test_form("form-2")) with self.diff_without_rebuild(): self.do_migration() with patch.object(mod.CaseDiffTool, "diff_cases"): self.do_case_patch() self.compare_diffs([ Diff('test-case', 'set_mismatch', ['xform_ids', '[*]'], old='', new='form-1'), ]) case = self._get_case("test-case") case.case_json["age"] = "30" # diff -> reubild SQL case case.save() self.do_case_diffs("pending") self.compare_diffs([]) self.assert_patched_cases(["test-case"]) def test_cannot_patch_case_missing_in_couch(self): self.submit_form(make_test_form("form-1", case_id="case-1")) self.do_migration(case_diff="none") CommCareCase.get_db().delete_doc("case-1") self.do_migration(forms="missing", case_diff="patch", diffs=[ Diff('case-1', 'missing', ['*'], old=MISSING, new='present'), ]) self.assert_patched_cases() def test_convert_error_form_for_case_missing_in_couch(self): def find_forms(case_id): return ["form-1"] self.submit_form(make_test_form("form-1", case_id="case-1")) self.do_migration(case_diff="none") CommCareCase.get_db().delete_doc("case-1") clear_local_domain_sql_backend_override(self.domain_name) form = self._get_form("form-1") form.problem = "something went wrong" form.save() self.do_case_diffs("pending") self.compare_diffs([ Diff('case-1', 'missing', ['*'], old=MISSING, new='present'), ]) with patch.object(casediff, "find_form_ids_updating_case", find_forms): self.do_migration(forms="missing", diffs=[]) def test_patch_case_closed_in_couch_not_sql(self): self.submit_form(make_test_form("form-1", case_id="case-1")) self.do_migration(case_diff="none") with self.augmented_couch_case("case-1") as case: case.closed = True case.closed_by = "system" case.closed_on = datetime(2010, 9, 8, 7, 6, 5) case.user_id = "system" case.save() self.do_case_diffs() self.compare_diffs([ Diff('case-1', 'diff', ['closed'], old=True, new=False), Diff('case-1', 'diff', ['user_id'], old='system', new='3fae4ea4af440efaa53441b5'), Diff('case-1', 'type', ['closed_by'], old='system', new=None), Diff('case-1', 'type', ['closed_on'], old='2010-09-08T07:06:05.000000Z', new=None), ]) self.do_case_patch() self.compare_diffs() self.assert_patched_cases(["case-1"]) def test_patch_case_index(self): self.submit_form(make_test_form("form-1", case_id="case-1")) self.do_migration(case_diff="none") index = { "doc_type": "CommCareCaseIndex", "identifier": "parent", "referenced_type": "household", "referenced_id": "a53346d5", "relationship": "child", } with self.augmented_couch_case("case-1") as case: case.indices = [CommCareCaseIndex.wrap(index)] case.save() self.do_case_diffs() self.compare_diffs([ Diff('case-1', 'missing', ['indices', '[*]'], old=index, new=MISSING), ]) self.do_case_patch() self.compare_diffs() self.assert_patched_cases(["case-1"]) def test_patch_missing_case_index(self): self.submit_form(make_test_form("form-1", case_id="case-1")) self.do_migration(case_diff="none") CommCareCaseIndexSQL( domain=self.domain_name, case_id="case-1", identifier="parent", referenced_id="a53346d5", referenced_type="household", relationship_id=CommCareCaseIndexSQL.CHILD, ).save() with self.diff_without_rebuild(): self.do_case_diffs() index = { "case_id": "case-1", "identifier": "parent", "referenced_id": "a53346d5", "referenced_type": "household", "relationship": "child", } self.compare_diffs([ Diff('case-1', 'missing', ['indices', '[*]'], old=MISSING, new=index), ]) with self.diff_without_rebuild(): self.do_case_patch() self.compare_diffs() self.assert_patched_cases(["case-1"]) def test_patch_missing_case_with_index(self): self.submit_form(make_test_form("form-1", case_id="case-1")) case = CaseAccessorCouch.get_case("case-1") case.indices = [CommCareCaseIndex.wrap({ "doc_type": "CommCareCaseIndex", "identifier": "parent", "referenced_type": "household", "referenced_id": "a53346d5", "relationship": "child", })] case.save() FormAccessors(self.domain_name).soft_delete_forms( ['form-1'], datetime.utcnow(), 'test-deletion') self.do_migration(diffs=IGNORE) self.compare_diffs(changes=[ Diff('case-1', 'missing', ['*'], old='*', new=MISSING, reason="deleted forms"), ]) self.do_case_patch() self.compare_diffs() self.assert_patched_cases(["case-1"]) def test_patch_cases_with_diffs(self): self.do_migration_with_diffs_and_changes() self.do_case_patch(cases="with-diffs") self.assert_patched_cases(["diff-case"]) self.compare_diffs(changes=[ Diff('change-case', 'missing', ['*'], old='*', new=MISSING, reason="deleted forms"), ]) def test_patch_cases_with_changes(self): self.do_migration_with_diffs_and_changes() self.do_case_patch(cases="with-changes") self.assert_patched_cases(["change-case"]) self.compare_diffs([ Diff('diff-case', 'diff', ['age'], old='30', new='27'), Diff('diff-case', 'set_mismatch', ['xform_ids', '[*]'], old='one', new=''), ]) def do_migration_with_diffs_and_changes(self): self.submit_form(make_test_form("zero", case_id="diff-case", age=27)) one = self.submit_form(make_test_form("one", case_id="diff-case", age=30)) one.initial_processing_complete = False one.save() two = self.submit_form(make_test_form("two", case_id="change-case", age=27)) FormAccessors(self.domain_name).soft_delete_forms( [two.form_id], datetime.utcnow(), 'test-deletion') self.do_migration(diffs=IGNORE) self.compare_diffs(diffs=[ Diff('diff-case', 'diff', ['age'], old='30', new='27'), Diff('diff-case', 'set_mismatch', ['xform_ids', '[*]'], old='one', new=''), ], changes=[ Diff('change-case', 'missing', ['*'], old='*', new=MISSING, reason="deleted forms"), ]) def create_form_with_duplicate_stock_transaction(self): from corehq.apps.commtrack.helpers import make_product from corehq.apps.commtrack.processing import process_stock thing1 = make_product(self.domain_name, 'thing-1', 'thing-1') self.submit_form(LEDGER_FORM.replace("thing-1", thing1._id)) stock_result = process_stock([self._get_form("ledger-form")]) stock_result.populate_models() for model in stock_result.models_to_save: model.save() return thing1._id def do_migration(self, *args, **kw): if kw.get("case_diff") != "patch": kw.setdefault("diffs", IGNORE) return super().do_migration(*args, **kw) def do_case_diffs(self, cases=None, stop=False): self.migration_success = True # clear migration failure on diff cases migrator = mod.get_migrator(self.domain_name, self.state_dir) return mod.do_case_diffs(migrator, cases, stop=stop, batch_size=100) def do_case_patch(self, cases=None, stop=False): self.migration_success = True # clear migration failure on diff cases migrator = mod.get_migrator(self.domain_name, self.state_dir) return mod.do_case_patch(migrator, cases, stop=stop, batch_size=100) @contextmanager def augmented_couch_case(self, case_id): case = CaseAccessorCouch.get_case(case_id) with self.diff_without_rebuild(): yield case def assert_patched_cases(self, case_ids=None): statedb = open_state_db(self.domain_name, self.state_dir) self.assertEqual(list(statedb.iter_patched_case_ids()), case_ids or []) self.assertFalse(list(statedb.iter_undiffed_case_ids())) THING_FORM = """ <?xml version="1.0" ?> <data name="Thing" uiVersion="1" version="11" xmlns="http://openrosa.org/formdesigner/thing-form" xmlns:jrm="http://dev.commcarehq.org/jr/xforms" > <thing>1</thing> <n0:case case_id="test-case" date_modified="2014-08-04T18:25:56.656Z" user_id="a362027f228d" xmlns:n0="http://commcarehq.org/case/transaction/v2" > <n0:create> <n0:case_name>Thing</n0:case_name> <n0:owner_id>a362027f228d</n0:owner_id> <n0:case_type>testing</n0:case_type> </n0:create> <n0:update> <n0:thing>1</n0:thing> </n0:update> </n0:case> <n1:meta xmlns:n1="http://openrosa.org/jr/xforms"> <n1:deviceID>cloudcare</n1:deviceID> <n1:timeStart>2014-07-13T11:20:11.381Z</n1:timeStart> <n1:timeEnd>2014-08-04T18:25:56.656Z</n1:timeEnd> <n1:username>thing</n1:username> <n1:userID>a362027f228d</n1:userID> <n1:instanceID>thing-form</n1:instanceID> <n2:appVersion xmlns:n2="http://commcarehq.org/xforms">2.0</n2:appVersion> </n1:meta> </data> """.strip() UPDATE_FORM = """ <?xml version="1.0" ?> <data name="Update" uiVersion="1" version="11" xmlns="http://openrosa.org/formdesigner/update-form" xmlns:jrm="http://dev.commcarehq.org/jr/xforms" > <age>27</age> <n0:case case_id="test-case" date_modified="2015-08-04T18:25:56.656Z" user_id="3fae4ea4af440efaa53441b5" xmlns:n0="http://commcarehq.org/case/transaction/v2" > <n0:update> <n0:age>27</n0:age> </n0:update> </n0:case> <n1:meta xmlns:n1="http://openrosa.org/jr/xforms"> <n1:deviceID>cloudcare</n1:deviceID> <n1:timeStart>2015-07-13T11:20:11.381Z</n1:timeStart> <n1:timeEnd>2015-08-04T18:25:56.656Z</n1:timeEnd> <n1:username>jeremy</n1:username> <n1:userID>3fae4ea4af440efaa53441b5</n1:userID> <n1:instanceID>update-form</n1:instanceID> <n2:appVersion xmlns:n2="http://commcarehq.org/xforms">2.0</n2:appVersion> </n1:meta> </data> """.strip() LEDGER_FORM = """ <?xml version="1.0" ?> <data name="Ledger" uiVersion="1" version="11" xmlns="http://openrosa.org/formdesigner/ledger-form" xmlns:jrm="http://dev.commcarehq.org/jr/xforms" > <thing>1</thing> <n2:transfer date="2014-08-04" dest="test-case" section-id="things" type="write_things_to_ledger" xmlns:n2="http://commcarehq.org/ledger/v1" > <n2:entry id="thing-1" quantity="1"/> </n2:transfer> <n0:case case_id="test-case" date_modified="2014-08-04T18:25:56.656Z" user_id="a362027f228d" xmlns:n0="http://commcarehq.org/case/transaction/v2" > <n0:create> <n0:case_name>Ledger</n0:case_name> <n0:owner_id>a362027f228d</n0:owner_id> <n0:case_type>testing</n0:case_type> </n0:create> <n0:update> <n0:thing>1</n0:thing> </n0:update> </n0:case> <n1:meta xmlns:n1="http://openrosa.org/jr/xforms"> <n1:deviceID>cloudcare</n1:deviceID> <n1:timeStart>2014-07-13T11:20:11.381Z</n1:timeStart> <n1:timeEnd>2014-08-04T18:25:56.656Z</n1:timeEnd> <n1:username>thing</n1:username> <n1:userID>a362027f228d</n1:userID> <n1:instanceID>ledger-form</n1:instanceID> <n2:appVersion xmlns:n2="http://commcarehq.org/xforms">2.0</n2:appVersion> </n1:meta> </data> """.strip()
py
1a3ac46f96ee228d2b890d7c4767b1626406753b
''' Cuckoo filter internal bucket. ''' import random class Bucket(): ''' Bucket class for storing fingerprints. ''' # https://docs.python.org/3/reference/datamodel.html#object.__slots__ __slots__ = ('size', 'bucket') def __init__(self, size=4): ''' Initialize a dynamic or static bucket to keep a set of Cuckoo fingerprints. size: The maximum number of fingerprints the bucket can store. Default size is 4, which closely approaches the best size for FPP between 0.00001 and 0.002 (see Fan et al.). If your targeted FPP is greater than 0.002, a bucket size of 2 is more space efficient. ''' self.size = size # The bucket is implemented as an array cause it's possible to have # multiple items with the same fingerprints self.bucket = [] # TODO: investigate a better way to compress the bucket's fingerprints. # It will be very helpful when long fingerprints are need for low error # rate. def insert(self, fingerprint): ''' Insert a fingerprint into the bucket, the fingerprint basically is just a bit vector. The longer the bit vector, the lower the collision rate. The insertion of duplicate entries is allowed. ''' if not self.is_full(): self.bucket.append(fingerprint) # When the bucket is not full, just append the fingerprint there return True # In static mode, the size of the bucket is fixed. It means that the # filter is reaching its capacity here. return False def contains(self, fingerprint): ''' Check if this bucket contains the provided fingerprint. ''' return fingerprint in self.bucket def find_and_replace(self, look_for, replace_with): ''' Find an exact fingerprint the specified bucket and replace it with another fingerprint. Return False if there is no such fingerprint. ''' try: self.bucket[self.bucket.index(look_for)] = replace_with return True except ValueError: # No such fingerprint in the bucket return False def delete(self, fingerprint): ''' Delete a fingerprint from the bucket. Returns True if the fingerprint was present in the bucket. This is useful for keeping track of how many items are present in the filter. ''' try: del self.bucket[self.bucket.index(fingerprint)] return True except ValueError: # No such fingerprint in the bucket return False def swap(self, fingerprint): ''' Swap a fingerprint with a randomly chosen fingerprint from the bucket. The given fingerprint is stored in the bucket. The swapped fingerprint is returned. ''' # There is tricky bug in swap function when an item is added several # times. In such case, there is a chance that a fingerprint is swapped # with itself thus trying to move fingerprints around won't work. # # Assuming that the bucket size is 4, the maximum number of times an # item can be added is 4 * 2 = 8. # # TODO: Investigate if there is a better solution for this cause this # is a form of local limit of Cuckoo filter. rindex = random.choice([i for i in range(len(self.bucket)) if fingerprint != self.bucket[i]]) # Swap the two fingerprints fingerprint, self.bucket[rindex] = self.bucket[rindex], fingerprint # and return the one from the bucket return fingerprint def is_full(self): ''' Signify that the bucket is full, a fingerprint will need to be swapped out. ''' return len(self.bucket) >= self.size def __contains__(self, fingerprint): return self.contains(fingerprint) def __repr__(self): return '<Bucket: {0}>'.format(self.bucket) def __sizeof__(self): return super().__sizeof__() + sum(f.__sizeof__() for f in self.bucket)
py
1a3ac4dc2691452a0d2d150c50270704e8969338
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.FileItem import FileItem from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.KoubeiCateringPosOrderUploadModel import KoubeiCateringPosOrderUploadModel class KoubeiCateringPosOrderUploadRequest(object): def __init__(self, biz_model=None): self._biz_model = biz_model self._biz_content = None self._version = "1.0" self._terminal_type = None self._terminal_info = None self._prod_code = None self._notify_url = None self._return_url = None self._udf_params = None self._need_encrypt = False @property def biz_model(self): return self._biz_model @biz_model.setter def biz_model(self, value): self._biz_model = value @property def biz_content(self): return self._biz_content @biz_content.setter def biz_content(self, value): if isinstance(value, KoubeiCateringPosOrderUploadModel): self._biz_content = value else: self._biz_content = KoubeiCateringPosOrderUploadModel.from_alipay_dict(value) @property def version(self): return self._version @version.setter def version(self, value): self._version = value @property def terminal_type(self): return self._terminal_type @terminal_type.setter def terminal_type(self, value): self._terminal_type = value @property def terminal_info(self): return self._terminal_info @terminal_info.setter def terminal_info(self, value): self._terminal_info = value @property def prod_code(self): return self._prod_code @prod_code.setter def prod_code(self, value): self._prod_code = value @property def notify_url(self): return self._notify_url @notify_url.setter def notify_url(self, value): self._notify_url = value @property def return_url(self): return self._return_url @return_url.setter def return_url(self, value): self._return_url = value @property def udf_params(self): return self._udf_params @udf_params.setter def udf_params(self, value): if not isinstance(value, dict): return self._udf_params = value @property def need_encrypt(self): return self._need_encrypt @need_encrypt.setter def need_encrypt(self, value): self._need_encrypt = value def add_other_text_param(self, key, value): if not self.udf_params: self.udf_params = dict() self.udf_params[key] = value def get_params(self): params = dict() params[P_METHOD] = 'koubei.catering.pos.order.upload' params[P_VERSION] = self.version if self.biz_model: params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':')) if self.biz_content: if hasattr(self.biz_content, 'to_alipay_dict'): params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':')) else: params['biz_content'] = self.biz_content if self.terminal_type: params['terminal_type'] = self.terminal_type if self.terminal_info: params['terminal_info'] = self.terminal_info if self.prod_code: params['prod_code'] = self.prod_code if self.notify_url: params['notify_url'] = self.notify_url if self.return_url: params['return_url'] = self.return_url if self.udf_params: params.update(self.udf_params) return params def get_multipart_params(self): multipart_params = dict() return multipart_params
py
1a3ac544206082f49c8a93925b2310ffccb0e7c0
import http.server class MyHandler(http.server.SimpleHTTPRequestHandler): def do_GET(self): if self.path == "good": self.send_response(200) self.send_header("Content-type", "text/plain") self.end_headers() self.wfile.write(b"A good request") return self.send_response(400) self.send_header("Content-type", "text/plain") self.end_headers() self.wfile.write(b"A bad request") http.server.HTTPServer(('', 8000), MyHandler).serve_forever()
py
1a3ac5528444b039a4f988e0b436d0f405fbe760
# Generated by Django 3.0.7 on 2020-06-19 20:22 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('streams', '0004_auto_20200619_1321'), ] operations = [ migrations.AlterField( model_name='game', name='num_players', field=models.IntegerField(), ), ]
py
1a3ac5da7330afd4a1ca8e69bf166d7961468b56
# Generated by Django 2.2 on 2019-05-02 02:01 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Aluno', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('NomeCompA', models.CharField(max_length=50, verbose_name='Nome do Aluno')), ('emailPersoA', models.CharField(max_length=50, verbose_name='E-mail')), ('senhaA', models.CharField(max_length=12, verbose_name='Senha')), ], ), migrations.CreateModel( name='Disciplina', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('NomeDisc', models.CharField(max_length=50, verbose_name='Disciplina')), ('CargaH', models.IntegerField(max_length=50, verbose_name='Carga Horária')), ('emailPersoP', models.CharField(max_length=50, verbose_name='E-mail')), ('NomeCurs', models.CharField(max_length=50, verbose_name='Nome do Curso')), ], ), migrations.CreateModel( name='Professor', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('NomeCompP', models.CharField(max_length=50, verbose_name='Nome do professor')), ('emailPersoP', models.CharField(max_length=50, verbose_name='E-mail')), ('senhaP', models.CharField(max_length=12, verbose_name='Senha')), ('DisciLec', models.CharField(max_length=50, verbose_name='Disciplinas Lecionadas')), ], ), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('login', models.CharField(max_length=15, verbose_name='Login')), ('senha', models.CharField(max_length=12, verbose_name='Senha')), ('email', models.CharField(max_length=50, verbose_name='E-mail')), ], ), ]
py
1a3ac6d6d06f1e69bfb97df91e7f5dd3d4a4589a
import argparse import logging from dvc.command.base import CmdBase, append_doc_link from dvc.exceptions import DvcException logger = logging.getLogger(__name__) class CmdRun(CmdBase): def run(self): if not any( [ self.args.deps, self.args.outs, self.args.outs_no_cache, self.args.metrics, self.args.metrics_no_cache, self.args.plots, self.args.plots_no_cache, self.args.outs_persist, self.args.outs_persist_no_cache, self.args.params, self.args.command, ] ): # pragma: no cover logger.error( "too few arguments. Specify at least one: `-d`, `-o`, `-O`, " "`-m`, `-M`, `-p`, `--plots`, `--plots-no-cache`, " "`--outs-persist`, `--outs-persist-no-cache`, `command`." ) return 1 try: self.repo.run( cmd=self._parsed_cmd(), outs=self.args.outs, outs_no_cache=self.args.outs_no_cache, metrics=self.args.metrics, metrics_no_cache=self.args.metrics_no_cache, plots=self.args.plots, plots_no_cache=self.args.plots_no_cache, deps=self.args.deps, params=self.args.params, fname=self.args.file, wdir=self.args.wdir, no_exec=self.args.no_exec, force=self.args.force, run_cache=not self.args.no_run_cache, no_commit=self.args.no_commit, outs_persist=self.args.outs_persist, outs_persist_no_cache=self.args.outs_persist_no_cache, always_changed=self.args.always_changed, name=self.args.name, single_stage=self.args.single_stage, external=self.args.external, ) except DvcException: logger.exception("") return 1 return 0 def _parsed_cmd(self): """ We need to take into account two cases: - ['python code.py foo bar']: Used mainly with dvc as a library - ['echo', 'foo bar']: List of arguments received from the CLI The second case would need quoting, as it was passed through: dvc run echo "foo bar" """ if len(self.args.command) < 2: return " ".join(self.args.command) return " ".join(self._quote_argument(arg) for arg in self.args.command) def _quote_argument(self, argument): if " " not in argument or '"' in argument: return argument return f'"{argument}"' def add_parser(subparsers, parent_parser): RUN_HELP = "Generate a stage file from a command and execute the command." run_parser = subparsers.add_parser( "run", parents=[parent_parser], description=append_doc_link(RUN_HELP, "run"), help=RUN_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) run_parser.add_argument( "-d", "--deps", action="append", default=[], help="Declare dependencies for reproducible cmd.", metavar="<path>", ) run_parser.add_argument( "-n", "--name", help="Stage name.", ) run_parser.add_argument( "-o", "--outs", action="append", default=[], help="Declare output file or directory.", metavar="<filename>", ) run_parser.add_argument( "-O", "--outs-no-cache", action="append", default=[], help="Declare output file or directory " "(do not put into DVC cache).", metavar="<filename>", ) run_parser.add_argument( "-p", "--params", action="append", default=[], help="Declare parameter to use as additional dependency.", metavar="[<filename>:]<params_list>", ) run_parser.add_argument( "-m", "--metrics", action="append", default=[], help="Declare output metric file.", metavar="<path>", ) run_parser.add_argument( "-M", "--metrics-no-cache", action="append", default=[], help="Declare output metric file (do not put into DVC cache).", metavar="<path>", ) run_parser.add_argument( "--plots", action="append", default=[], help="Declare output plot file.", metavar="<path>", ) run_parser.add_argument( "--plots-no-cache", action="append", default=[], help="Declare output plot file (do not put into DVC cache).", metavar="<path>", ) run_parser.add_argument( "--file", help="Specify name of the DVC-file this command will generate.", metavar="<filename>", ) run_parser.add_argument( "-w", "--wdir", help="Directory within your repo to run your command in.", metavar="<path>", ) run_parser.add_argument( "--no-exec", action="store_true", default=False, help="Only create stage file without actually running it.", ) run_parser.add_argument( "-f", "--force", action="store_true", default=False, help="Overwrite existing stage", ) run_parser.add_argument( "--no-run-cache", action="store_true", default=False, help=( "Execute the command even if this stage has already been run " "with the same command/dependencies/outputs/etc before." ), ) run_parser.add_argument( "--no-commit", action="store_true", default=False, help="Don't put files/directories into cache.", ) run_parser.add_argument( "--outs-persist", action="append", default=[], help="Declare output file or directory that will not be " "removed upon repro.", metavar="<filename>", ) run_parser.add_argument( "--outs-persist-no-cache", action="append", default=[], help="Declare output file or directory that will not be " "removed upon repro (do not put into DVC cache).", metavar="<filename>", ) run_parser.add_argument( "--always-changed", action="store_true", default=False, help="Always consider this DVC-file as changed.", ) run_parser.add_argument( "--single-stage", action="store_true", default=False, help=argparse.SUPPRESS, ) run_parser.add_argument( "--external", action="store_true", default=False, help="Allow outputs that are outside of the DVC repository.", ) run_parser.add_argument( "command", nargs=argparse.REMAINDER, help="Command to execute." ) run_parser.set_defaults(func=CmdRun)
py
1a3ac7fc465bfcad7b59f07226989c220a81ac3f
from functools import lru_cache as memoized import os from os import path import sys import yaml import geopandas as gpd from mapshader.colors import colors from mapshader.io import load_raster from mapshader.io import load_vector from mapshader.transforms import get_transform_by_name import spatialpandas class MapSource(object): def __init__(self, name=None, description=None, filepath=None, legend=None, config_path=None, data=None, geometry_type=None, key=None, text=None, fields=None, span=None, route=None, geometry_field='geometry', xfield='geometry', yfield='geometry', zfield=None, agg_func=None, raster_interpolate='linear', shade_how='linear', cmap=colors['viridis'], color_key=None, dynspread=None, extras=None, raster_padding=0, service_types=None, full_extent=None, default_extent=None, default_height=256, default_width=256, overviews=None, transforms=None, attrs=None, preload=False): if fields is None and isinstance(data, (gpd.GeoDataFrame)): fields = [geometry_field] if zfield: fields.append(zfield) if extras is None: extras = [] if transforms is None: transforms = [] if overviews is None: overviews = {} if service_types is None: service_types = ('tile', 'image', 'wms', 'geojson') if span == 'min/max' and zfield is None and geometry_type != 'raster': raise ValueError('You must include a zfield for min/max scan calculation') if legend is not None and geometry_type == 'raster': if legend[0].get('value') is not None: cmap = {} for leg in legend: cor = leg['color'] val = leg['value'] if isinstance(val, (list, tuple)): val = tuple(val) cmap[val] = cor val = 20037508.3427892 if default_extent is None: default_extent = [-val, -val, val, val] self.name = name self.description = description self.filepath = filepath self.config_path = config_path self.geometry_type = geometry_type self.key = key self.text = text self.legend = legend self.fields = fields self.span = span self.route = route self.xfield = xfield self.raster_padding = 0 self.yfield = yfield self.zfield = zfield self.agg_func = agg_func self.overviews = overviews self.raster_agg_func = raster_interpolate self.shade_how = shade_how self.cmap = cmap self.color_key = color_key self.dynspread = dynspread self.extras = extras self.service_types = service_types self.transforms = transforms self.default_extent = default_extent self.default_width = default_width self.default_height = default_height self.preload = preload self.geometry_field = geometry_field self.is_loaded = False self.data = data # autoload if overviews are present contains_overviews = bool(len([t for t in transforms if 'overviews' in t['name']])) if self.preload or contains_overviews: self.load() @property def load_func(self): raise NotImplementedError() def get_full_extent(self): raise NotImplementedError() def load(self): if self.is_loaded: return self if self.data is None: if self.config_path: ogcwd = os.getcwd() config_dir = path.abspath(path.dirname(self.config_path)) os.chdir(config_dir) try: data_path = path.abspath(path.expanduser(self.filepath)) finally: os.chdir(ogcwd) elif self.filepath.startswith('zip'): print('Zipfile Path', file=sys.stdout) data_path = self.filepath elif not path.isabs(self.filepath): print('Not Absolute', file=sys.stdout) data_path = path.abspath(path.expanduser(self.filepath)) else: print('Using Given Filepath unmodified: config{self.config_file}', file=sys.stdout) data_path = self.filepath data = self.load_func(data_path) else: data = self.data if self.fields: data = data[self.fields] self.data = data self._finish_load() return self def _finish_load(self): if self.is_loaded: return self self._apply_transforms() self.is_loaded = True def _apply_transforms(self): print('# ----------------------', file=sys.stdout) print(f'# APPLYING TRANSFORMS {self.name}', file=sys.stdout) print('# ----------------------', file=sys.stdout) for trans in self.transforms: transform_name = trans['name'] print(f'\tApplying {transform_name}', file=sys.stdout) func = get_transform_by_name(transform_name) args = trans.get('args', {}) if 'overviews' in transform_name: self.overviews = func(self.data, **args) else: self.data = func(self.data, **args) # apply transforms to overviews if they exist for level, overview_data in self.overviews.items(): self.overviews[level] = func(overview_data, **args) return self @staticmethod def from_obj(obj: dict): transforms = obj.get('transforms') if transforms and isinstance(transforms, (list, tuple)): n = 'raster_to_categorical_points' has_to_vector = len([t for t in transforms if t['name'] == n]) else: has_to_vector = False if obj['geometry_type'] == 'raster' or has_to_vector: return RasterSource(**obj) else: return VectorSource(**obj) class RasterSource(MapSource): @property def load_func(self): return load_raster @property @memoized() def full_extent(self): return (self.data.coords['x'].min().compute().item(), self.data.coords['y'].min().compute().item(), self.data.coords['x'].max().compute().item(), self.data.coords['y'].max().compute().item()) class VectorSource(MapSource): @property def load_func(self): return load_vector @property @memoized() def full_extent(self): if isinstance(self.data, spatialpandas.GeoDataFrame): return self.data.to_geopandas()[self.geometry_field].total_bounds else: return self.data[self.geometry_field].total_bounds class MapService(): def __init__(self, source: MapSource, renderers=[]): self.source = source self.renderers = renderers @property def key(self): return f'{self.source.key}-{self.service_type}' @property def name(self): return f'{self.source.name} {self.service_type}' @property def legend_name(self): return f'{self.name}-legend' @property def default_extent(self): return self.source.default_extent @property def default_width(self): return self.source.default_width @property def default_height(self): return self.source.default_height @property def service_page_url(self): return f'/{self.key}' @property def legend_url(self): return f'/{self.key}/legend' @property def service_page_name(self): return f'/{self.key}-{self.service_type}' @property def service_url(self): raise NotImplementedError() @property def client_url(self): raise NotImplementedError() @property def default_url(self): raise NotImplementedError() @property def service_type(self): raise NotImplementedError() class TileService(MapService): @property def service_url(self): return f'/{self.key}' + '/tile/<z>/<x>/<y>' @property def client_url(self): return f'/{self.key}' + '/tile/{z}/{x}/{y}' @property def default_url(self): return f'/{self.key}' + '/tile/0/0/0' @property def service_type(self): return 'tile' class ImageService(MapService): @property def service_url(self): url = (f'/{self.key}' '/image' '/<xmin>/<ymin>/<xmax>/<ymax>' '/<width>/<height>') return url @property def client_url(self): return f'/{self.key}' + '/image/{XMIN}/{YMIN}/{XMAX}/{YMAX}/{width}/{height}' @property def default_url(self): xmin = self.default_extent[0] ymin = self.default_extent[1] xmax = self.default_extent[2] ymax = self.default_extent[3] width = self.default_width height = self.default_height return f'/{self.key}/image/{xmin}/{ymin}/{xmax}/{ymax}/{width}/{height}' @property def service_type(self): return 'image' class WMSService(MapService): @property def service_url(self): url = f'/{self.key}/wms' return url @property def client_url(self, width=256, height=256): url = f'/{self.key}' url += '?bbox={XMIN},{YMIN},{XMAX},{YMAX}' url += f'&width={width}&height={height}' return url @property def default_url(self): xmin = self.default_extent[0] ymin = self.default_extent[1] xmax = self.default_extent[2] ymax = self.default_extent[3] width = self.default_width height = self.default_height return f'/{self.key}?bbox={xmin},{ymin},{xmax},{ymax}&width={width}&height={height}' @property def service_type(self): return 'wms' class GeoJSONService(MapService): @property def service_url(self): url = f'/{self.key}/geojson' return url @property def client_url(self): url = f'/{self.key}/geojson' return url @property def default_url(self): return f'/{self.key}/geojson' @property def service_type(self): return 'geojson' # ---------------------------------------------------------------------------- # DEFAULT MAP SOURCES # ---------------------------------------------------------------------------- def world_countries_source(): # construct transforms select_by_attrs_transform = dict(name='select_by_attributes', args=dict(field='name', value=['Antarctica', 'Fr. S. Antarctic Lands'], operator='NOT IN')) reproject_transform = dict(name='reproject_vector', args=dict(epsg=3857)) sp_transform = dict(name='to_spatialpandas', args=dict(geometry_field='geometry')) overviews_transform = dict(name='build_vector_overviews', args=dict(levels={'0': 10000, '1': 2500, '2': 1250, '3': 650, '4': 300, '5': 150, '6': 75, '7': 32, '8': 20, '9': 10, '10': 5}, geometry_field='geometry')) transforms = [select_by_attrs_transform, reproject_transform, overviews_transform, sp_transform] # construct value obj source_obj = dict() source_obj['name'] = 'World Countries' source_obj['key'] = 'world-countries' source_obj['text'] = 'World Countries' source_obj['description'] = 'World Country Polygons' source_obj['geometry_type'] = 'polygon' source_obj['agg_func'] = 'max' source_obj['shade_how'] = 'linear' source_obj['span'] = 'min/max' source_obj['raster_interpolate'] = 'linear' source_obj['xfield'] = 'x' source_obj['yfield'] = 'y' source_obj['zfield'] = 'pop_est' source_obj['filepath'] = gpd.datasets.get_path('naturalearth_lowres') source_obj['transforms'] = transforms source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson'] return source_obj def world_boundaries_source(): # construct transforms select_by_attrs_transform = dict(name='select_by_attributes', args=dict(field='name', value=['Antarctica', 'Fr. S. Antarctic Lands'], operator='NOT IN')) reproject_transform = dict(name='reproject_vector', args=dict(epsg=3857)) polygon_to_line_transform = dict(name='polygon_to_line', args=dict(geometry_field='geometry')) sp_transform = dict(name='to_spatialpandas', args=dict(geometry_field='geometry')) transforms = [select_by_attrs_transform, polygon_to_line_transform, reproject_transform, sp_transform] # construct value obj source_obj = dict() source_obj['name'] = 'World Boundaries' source_obj['key'] = 'world-boundaries' source_obj['text'] = 'World Boundaries' source_obj['description'] = 'World Country Boundaries' source_obj['geometry_type'] = 'line' source_obj['agg_func'] = 'max' source_obj['shade_how'] = 'linear' source_obj['cmap'] = ['aqua', 'aqua'] source_obj['raster_interpolate'] = 'linear' source_obj['xfield'] = 'x' source_obj['yfield'] = 'y' source_obj['filepath'] = gpd.datasets.get_path('naturalearth_lowres') source_obj['transforms'] = transforms source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson'] return source_obj def world_cities_source(): # construct transforms reproject_transform = dict(name='reproject_vector', args=dict(epsg=3857)) add_xy_fields_transform = dict(name='add_xy_fields', args=dict(geometry_field='geometry')) sp_transform = dict(name='to_spatialpandas', args=dict(geometry_field='geometry')) transforms = [reproject_transform, add_xy_fields_transform, sp_transform] # construct value obj source_obj = dict() source_obj['name'] = 'World Cities' source_obj['key'] = 'world-cities' source_obj['text'] = 'World Cities' source_obj['description'] = 'World Cities Point Locations' source_obj['geometry_type'] = 'point' source_obj['agg_func'] = 'max' source_obj['cmap'] = ['aqua', 'aqua'] source_obj['shade_how'] = 'linear' source_obj['dynspread'] = 2 source_obj['raster_interpolate'] = 'linear' source_obj['xfield'] = 'X' source_obj['yfield'] = 'Y' source_obj['filepath'] = gpd.datasets.get_path('naturalearth_cities') source_obj['transforms'] = transforms source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson'] return source_obj def nybb_source(): # construct transforms reproject_transform = dict(name='reproject_vector', args=dict(epsg=3857)) sp_transform = dict(name='to_spatialpandas', args=dict(geometry_field='geometry')) transforms = [reproject_transform, sp_transform] # construct value obj source_obj = dict() source_obj['name'] = 'NYC Admin' source_obj['key'] = 'nyc-boroughs' source_obj['text'] = 'NYC Boroughs' source_obj['description'] = 'New York City Boroughs' source_obj['geometry_type'] = 'polygon' source_obj['agg_func'] = 'max' source_obj['shade_how'] = 'linear' source_obj['span'] = 'min/max' source_obj['dynspread'] = None source_obj['raster_interpolate'] = 'linear' source_obj['xfield'] = 'geometry' source_obj['yfield'] = 'geometry' source_obj['zfield'] = 'BoroCode' source_obj['filepath'] = gpd.datasets.get_path('nybb') source_obj['transforms'] = transforms source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson'] return source_obj def elevation_source(): # find data path HERE = path.abspath(path.dirname(__file__)) FIXTURES_DIR = path.join(HERE, 'tests', 'fixtures') elevation_path = path.join(FIXTURES_DIR, 'elevation.tif') # construct transforms squeeze_transform = dict(name='squeeze', args=dict(dim='band')) cast_transform = dict(name='cast', args=dict(dtype='float64')) orient_transform = dict(name='orient_array') flip_transform = dict(name='flip_coords', args=dict(dim='y')) reproject_transform = dict(name='reproject_raster', args=dict(epsg=3857)) transforms = [squeeze_transform, cast_transform, orient_transform, flip_transform, reproject_transform] # construct value obj source_obj = dict() source_obj['name'] = 'Elevation' source_obj['key'] = 'elevation' source_obj['text'] = 'Elevation' source_obj['description'] = 'Global Elevation Dataset' source_obj['geometry_type'] = 'raster' source_obj['shade_how'] = 'linear' source_obj['cmap'] = ['white', 'black'] source_obj['span'] = (58, 248) source_obj['raster_padding'] = 0 source_obj['raster_interpolate'] = 'linear' source_obj['xfield'] = 'geometry' source_obj['yfield'] = 'geometry' source_obj['filepath'] = elevation_path source_obj['transforms'] = transforms source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson'] return source_obj def elevation_source_netcdf(): # find data path HERE = path.abspath(path.dirname(__file__)) FIXTURES_DIR = path.join(HERE, 'tests', 'fixtures') elevation_path = path.join(FIXTURES_DIR, 'elevation.nc') # construct transforms transforms = [] # construct value obj source_obj = dict() source_obj['name'] = 'Elevation NetCDF' source_obj['key'] = 'elevation-netcdf' source_obj['text'] = 'Elevation NetCDF' source_obj['description'] = 'Global Elevation Dataset (NetCDF)' source_obj['geometry_type'] = 'raster' source_obj['shade_how'] = 'linear' source_obj['cmap'] = ['white', 'black'] source_obj['span'] = (58, 248) source_obj['raster_padding'] = 0 source_obj['raster_interpolate'] = 'linear' source_obj['xfield'] = 'geometry' source_obj['yfield'] = 'geometry' source_obj['filepath'] = elevation_path source_obj['transforms'] = transforms source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson'] return source_obj def parse_sources(source_objs, config_path=None, contains=None): service_classes = { 'tile': TileService, 'wms': WMSService, 'image': ImageService, 'geojson': GeoJSONService, } for source in source_objs: for service_type in source['service_types']: source['config_path'] = config_path if contains and contains not in source.get('key'): continue # create sources source_obj = MapSource.from_obj(source) # create services ServiceKlass = service_classes[service_type] # TODO: add renderers here... yield ServiceKlass(source=source_obj) def get_services(config_path=None, include_default=True, contains=None, sources=None): source_objs = None if sources is not None: source_objs = sources elif config_path is None: print('No Config Found...using default services...', file=sys.stdout) source_objs = [world_countries_source(), world_boundaries_source(), world_cities_source(), nybb_source(), elevation_source(), elevation_source_netcdf()] else: with open(config_path, 'r') as f: content = f.read() config_obj = yaml.load(content) source_objs = config_obj['sources'] if include_default: source_objs += [world_countries_source(), world_boundaries_source(), world_cities_source(), nybb_source(), elevation_source()] for service in parse_sources(source_objs, config_path=config_path, contains=contains): yield service
py
1a3ac90b831ef2c1d4e4842736c1c90c5143833e
from .meta import SnorkelBase, snorkel_postgres from sqlalchemy import Column, String, Integer, Text, ForeignKey, UniqueConstraint from sqlalchemy.dialects import postgresql from sqlalchemy.orm import relationship, backref from sqlalchemy.types import PickleType from sqlalchemy.sql import select, text class Context(SnorkelBase): """ A piece of content from which Candidates are composed. """ __tablename__ = 'context' id = Column(Integer, primary_key=True) type = Column(String, nullable=False) stable_id = Column(String, unique=True, nullable=False) __mapper_args__ = { 'polymorphic_identity': 'context', 'polymorphic_on': type } def get_parent(self): raise NotImplementedError() def get_children(self): raise NotImplementedError() def get_sentence_generator(self): raise NotImplementedError() class Document(Context): """ A root Context. """ __tablename__ = 'document' id = Column(Integer, ForeignKey('context.id', ondelete='CASCADE'), primary_key=True) name = Column(String, unique=True, nullable=False) meta = Column(PickleType) __mapper_args__ = { 'polymorphic_identity': 'document', } def get_parent(self): return None def get_children(self): return self.sentences def get_sentence_generator(self): for sentence in self.sentences: yield sentence def __repr__(self): return "Document " + str(self.name) class Sentence(Context): """A sentence Context in a Document.""" __tablename__ = 'sentence' id = Column(Integer, ForeignKey('context.id', ondelete='CASCADE'), primary_key=True) document_id = Column(Integer, ForeignKey('document.id', ondelete='CASCADE')) position = Column(Integer, nullable=False) document = relationship('Document', backref=backref('sentences', order_by=position, cascade='all, delete-orphan'), foreign_keys=document_id) text = Column(Text, nullable=False) if snorkel_postgres: words = Column(postgresql.ARRAY(String), nullable=False) char_offsets = Column(postgresql.ARRAY(Integer), nullable=False) lemmas = Column(postgresql.ARRAY(String)) pos_tags = Column(postgresql.ARRAY(String)) ner_tags = Column(postgresql.ARRAY(String)) dep_parents = Column(postgresql.ARRAY(Integer)) dep_labels = Column(postgresql.ARRAY(String)) entity_cids = Column(postgresql.ARRAY(String)) entity_types = Column(postgresql.ARRAY(String)) else: words = Column(PickleType, nullable=False) char_offsets = Column(PickleType, nullable=False) lemmas = Column(PickleType) pos_tags = Column(PickleType) ner_tags = Column(PickleType) dep_parents = Column(PickleType) dep_labels = Column(PickleType) entity_cids = Column(PickleType) entity_types = Column(PickleType) __mapper_args__ = { 'polymorphic_identity': 'sentence', } __table_args__ = ( UniqueConstraint(document_id, position), ) def get_parent(self): return self.document def get_children(self): return self.spans def _asdict(self): return { 'id': self.id, 'document': self.document, 'position': self.position, 'text': self.text, 'words': self.words, 'char_offsets': self.char_offsets, 'lemmas': self.lemmas, 'pos_tags': self.pos_tags, 'ner_tags': self.ner_tags, 'dep_parents': self.dep_parents, 'dep_labels': self.dep_labels, 'entity_cids': self.entity_cids, 'entity_types': self.entity_types } def get_sentence_generator(self): yield self def __repr__(self): return "Sentence" + str((self.document, self.position, self.text)) class TemporaryContext(object): """ A context which does not incur the overhead of a proper ORM-based Context object. The TemporaryContext class is specifically for the candidate extraction process, during which a CandidateSpace object will generate many TemporaryContexts, which will then be filtered by Matchers prior to materialization of Candidates and constituent Context objects. Every Context object has a corresponding TemporaryContext object from which it inherits. A TemporaryContext must have specified equality / set membership semantics, a stable_id for checking uniqueness against the database, and a promote() method which returns a corresponding Context object. """ def __init__(self): self.id = None def load_id_or_insert(self, session): if self.id is None: stable_id = self.get_stable_id() id = session.execute(select([Context.id]).where(Context.stable_id == stable_id)).first() if id is None: self.id = session.execute( Context.__table__.insert(), {'type': self._get_table_name(), 'stable_id': stable_id}).inserted_primary_key[0] insert_args = self._get_insert_args() insert_args['id'] = self.id session.execute(text(self._get_insert_query()), insert_args) else: self.id = id[0] def __eq__(self, other): raise NotImplementedError() def __ne__(self, other): raise NotImplementedError() def __hash__(self): raise NotImplementedError() def _get_polymorphic_identity(self): raise NotImplementedError() def get_stable_id(self): raise NotImplementedError() def _get_table_name(self): raise NotImplementedError() def _get_insert_query(self): raise NotImplementedError() def _get_insert_args(self): raise NotImplementedError() class TemporarySpan(TemporaryContext): """The TemporaryContext version of Span""" def __init__(self, sentence, char_start, char_end, meta=None): super(TemporarySpan, self).__init__() self.sentence = sentence # The sentence Context of the Span self.char_end = char_end self.char_start = char_start self.meta = meta def __len__(self): return self.char_end - self.char_start + 1 def __eq__(self, other): try: return self.sentence == other.sentence and self.char_start == other.char_start \ and self.char_end == other.char_end except AttributeError: return False def __ne__(self, other): try: return self.sentence != other.sentence or self.char_start != other.char_start \ or self.char_end != other.char_end except AttributeError: return True def __hash__(self): return hash(self.sentence) + hash(self.char_start) + hash(self.char_end) def get_stable_id(self): return construct_stable_id(self.sentence, self._get_polymorphic_identity(), self.char_start, self.char_end) def _get_table_name(self): return 'span' def _get_polymorphic_identity(self): return 'span' def _get_insert_query(self): return """INSERT INTO span VALUES(:id, :sentence_id, :char_start, :char_end, :meta)""" def _get_insert_args(self): return {'sentence_id' : self.sentence.id, 'char_start': self.char_start, 'char_end' : self.char_end, 'meta' : self.meta} def get_word_start(self): return self.char_to_word_index(self.char_start) def get_word_end(self): return self.char_to_word_index(self.char_end) def get_n(self): return self.get_word_end() - self.get_word_start() + 1 def char_to_word_index(self, ci): """Given a character-level index (offset), return the index of the **word this char is in**""" i = None for i, co in enumerate(self.sentence.char_offsets): if ci == co: return i elif ci < co: return i-1 return i def word_to_char_index(self, wi): """Given a word-level index, return the character-level index (offset) of the word's start""" return self.sentence.char_offsets[wi] def get_attrib_tokens(self, a='words'): """Get the tokens of sentence attribute _a_ over the range defined by word_offset, n""" return self.sentence.__getattribute__(a)[self.get_word_start():self.get_word_end() + 1] def get_attrib_span(self, a, sep=" "): """Get the span of sentence attribute _a_ over the range defined by word_offset, n""" # NOTE: Special behavior for words currently (due to correspondence with char_offsets) if a == 'words': return self.sentence.text[self.char_start:self.char_end + 1] else: return sep.join(self.get_attrib_tokens(a)) def get_span(self, sep=" "): return self.get_attrib_span('words', sep) def __contains__(self, other_span): return other_span.char_start >= self.char_start and other_span.char_end <= self.char_end def __getitem__(self, key): """ Slice operation returns a new candidate sliced according to **char index** Note that the slicing is w.r.t. the candidate range (not the abs. sentence char indexing) """ if isinstance(key, slice): char_start = self.char_start if key.start is None else self.char_start + key.start if key.stop is None: char_end = self.char_end elif key.stop >= 0: char_end = self.char_start + key.stop - 1 else: char_end = self.char_end + key.stop return self._get_instance(char_start=char_start, char_end=char_end, sentence=self.sentence) else: raise NotImplementedError() def __repr__(self): return u'%s("%s", sentence=%s, chars=[%s,%s], words=[%s,%s])' \ % (self.__class__.__name__, self.get_span(), self.sentence.id, self.char_start, self.char_end, self.get_word_start(), self.get_word_end()) def _get_instance(self, **kwargs): return TemporarySpan(**kwargs) class Span(Context, TemporarySpan): """ A span of characters, identified by Context id and character-index start, end (inclusive). char_offsets are **relative to the Context start** """ __tablename__ = 'span' id = Column(Integer, ForeignKey('context.id', ondelete='CASCADE'), primary_key=True) sentence_id = Column(Integer, ForeignKey('sentence.id', ondelete='CASCADE')) char_start = Column(Integer, nullable=False) char_end = Column(Integer, nullable=False) meta = Column(PickleType) __table_args__ = ( UniqueConstraint(sentence_id, char_start, char_end), ) __mapper_args__ = { 'polymorphic_identity': 'span', 'inherit_condition': (id == Context.id) } sentence = relationship('Sentence', backref=backref('spans', cascade='all, delete-orphan'), order_by=char_start, foreign_keys=sentence_id) def get_parent(self): return self.sentence def get_children(self): return None def _get_instance(self, **kwargs): return Span(**kwargs) # We redefine these to use default semantics, overriding the operators inherited from TemporarySpan def __eq__(self, other): return self is other def __ne__(self, other): return self is not other def __hash__(self): return id(self) def split_stable_id(stable_id): """ Split stable id, returning: * Document (root) stable ID * Context polymorphic type * Character offset start, end *relative to document start* Returns tuple of four values. """ split1 = stable_id.split('::') if len(split1) == 2: split2 = split1[1].split(':') if len(split2) == 3: return split1[0], split2[0], int(split2[1]), int(split2[2]) raise ValueError("Malformed stable_id:", stable_id) def construct_stable_id(parent_context, polymorphic_type, relative_char_offset_start, relative_char_offset_end): """Contruct a stable ID for a Context given its parent and its character offsets relative to the parent""" doc_id, _, parent_doc_char_start, _ = split_stable_id(parent_context.stable_id) start = parent_doc_char_start + relative_char_offset_start end = parent_doc_char_start + relative_char_offset_end return "%s::%s:%s:%s" % (doc_id, polymorphic_type, start, end)
py
1a3acaa95869b77e55fb9f79eae5a39622b780aa
from dataclasses import dataclass from typing import Optional, Type, Union from unittest.mock import DEFAULT, _Sentinel @dataclass class TestConfig: url: Optional[str] = None installer: Optional[str] = None extras: Union[str, None, Type[_Sentinel]] = DEFAULT version: Optional[str] = None verbose: bool = False rev: Optional[str] = None
py
1a3acaeeefa85548aea41625a841ceb17508c45b
# Copyright 2002 by Andrew Dalke. All rights reserved. # Revisions 2007-2016 copyright by Peter Cock. All rights reserved. # Revisions 2008-2009 copyright by Cymon J. Cox. All rights reserved. # # This file is part of the Biopython distribution and governed by your # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. # # Note that BioSQL (including the database schema and scripts) is # available and licensed separately. Please consult www.biosql.org """Implementations of Biopython-like Seq objects on top of BioSQL. This allows retrival of items stored in a BioSQL database using a biopython-like SeqRecord and Seq interface. Note: Currently we do not support recording per-letter-annotations (like quality scores) in BioSQL. """ from Bio.Seq import Seq, UnknownSeq from Bio.SeqRecord import SeqRecord, _RestrictedDict from Bio import SeqFeature class DBSeq(Seq): """BioSQL equivalent of the Biopython Seq object.""" def __init__(self, primary_id, adaptor, alphabet=None, start=0, length=0): """Create a new DBSeq object referring to a BioSQL entry. You wouldn't normally create a DBSeq object yourself, this is done for you when retrieving a DBSeqRecord object from the database. """ if alphabet is not None: raise ValueError("The alphabet argument is no longer supported") self.primary_id = primary_id self.adaptor = adaptor self._length = length self.start = start def __len__(self): """Return the length of the sequence.""" return self._length def __getitem__(self, index): # Seq API requirement """Return a subsequence or single letter.""" if isinstance(index, int): # Return a single letter as a string i = index if i < 0: if -i > self._length: raise IndexError(i) i = i + self._length elif i >= self._length: raise IndexError(i) return self.adaptor.get_subseq_as_string( self.primary_id, self.start + i, self.start + i + 1 ) if not isinstance(index, slice): raise TypeError("Unexpected index type") # Return the (sub)sequence as another DBSeq or Seq object # (see the Seq obect's __getitem__ method) if index.start is None: i = 0 else: i = index.start if i < 0: # Map to equavilent positive index if -i > self._length: raise IndexError(i) i = i + self._length elif i >= self._length: # Trivial case, should return empty string! i = self._length if index.stop is None: j = self._length else: j = index.stop if j < 0: # Map to equavilent positive index if -j > self._length: raise IndexError(j) j = j + self._length elif j >= self._length: j = self._length if i >= j: # Trivial case, empty string. return Seq("") elif index.step is None or index.step == 1: # Easy case - can return a DBSeq with the start and end adjusted return self.__class__( self.primary_id, self.adaptor, None, self.start + i, j - i ) else: # Tricky. Will have to create a Seq object because of the stride full = self.adaptor.get_subseq_as_string( self.primary_id, self.start + i, self.start + j ) return Seq(full[:: index.step]) def tostring(self): """Return the full sequence as a python string (DEPRECATED). You are now encouraged to use str(my_seq) instead of my_seq.tostring(). """ import warnings warnings.warn( "This method is obsolete; please use str(my_seq) " "instead of my_seq.tostring().", PendingDeprecationWarning, ) return self.adaptor.get_subseq_as_string( self.primary_id, self.start, self.start + self._length ) def __str__(self): """Return the full sequence as a python string.""" return self.adaptor.get_subseq_as_string( self.primary_id, self.start, self.start + self._length ) data = property(tostring, doc="Sequence as string (DEPRECATED)") def toseq(self): """Return the full sequence as a Seq object.""" # Note - the method name copies that of the MutableSeq object return Seq(str(self)) def __add__(self, other): """Add another sequence or string to this sequence. The sequence is first converted to a Seq object before the addition. The returned object is a Seq object, not a DBSeq object. """ return self.toseq() + other def __radd__(self, other): """Add another sequence or string to the left. The sequence is first converted to a Seq object before the addition. The returned object is a Seq object, not a DBSeq object. """ return other + self.toseq() def __mul__(self, other): """Multiply sequence by an integer. The sequence is first converted to a Seq object before multiplication. The returned object is a Seq object, not a DBSeq object. """ return self.toseq() * other def __rmul__(self, other): """Multiply integer by a sequence. The sequence is first converted to a Seq object before multiplication. The returned object is a Seq object, not a DBSeq object. """ return other * self.toseq() def __imul__(self, other): """Multiply sequence by integer in-place. The sequence is first converted to a Seq object before multiplication. The returned object is a Seq object, not a DBSeq object. """ return self.toseq() * other def _retrieve_seq_len(adaptor, primary_id): # The database schema ensures there will be only one matching row seqs = adaptor.execute_and_fetchall( "SELECT length FROM biosequence WHERE bioentry_id = %s", (primary_id,) ) if not seqs: return None assert len(seqs) == 1 (given_length,) = seqs[0] return int(given_length) def _retrieve_seq(adaptor, primary_id): # The database schema ensures there will be only one matching # row in the table. # If an UnknownSeq was recorded, seq will be NULL, # but length will be populated. This means length(seq) # will return None. seqs = adaptor.execute_and_fetchall( "SELECT alphabet, length, length(seq) FROM biosequence WHERE bioentry_id = %s", (primary_id,), ) if not seqs: return assert len(seqs) == 1 moltype, given_length, length = seqs[0] try: length = int(length) given_length = int(length) assert length == given_length have_seq = True except TypeError: assert length is None seqs = adaptor.execute_and_fetchall( "SELECT alphabet, length, seq FROM biosequence WHERE bioentry_id = %s", (primary_id,), ) assert len(seqs) == 1 moltype, given_length, seq = seqs[0] assert seq is None or seq == "" length = int(given_length) have_seq = False del seq del given_length if have_seq: return DBSeq(primary_id, adaptor, alphabet=None, start=0, length=int(length)) else: if moltype in ("dna", "rna"): character = "N" elif moltype == "protein": character = "X" else: character = "?" return UnknownSeq(length, character=character) def _retrieve_dbxrefs(adaptor, primary_id): """Retrieve the database cross references for the sequence (PRIVATE).""" _dbxrefs = [] dbxrefs = adaptor.execute_and_fetchall( "SELECT dbname, accession, version" " FROM bioentry_dbxref join dbxref using (dbxref_id)" " WHERE bioentry_id = %s" ' ORDER BY "rank"', (primary_id,), ) for dbname, accession, version in dbxrefs: if version and version != "0": v = "%s.%s" % (accession, version) else: v = accession _dbxrefs.append("%s:%s" % (dbname, v)) return _dbxrefs def _retrieve_features(adaptor, primary_id): sql = ( 'SELECT seqfeature_id, type.name, "rank"' " FROM seqfeature join term type on (type_term_id = type.term_id)" " WHERE bioentry_id = %s" ' ORDER BY "rank"' ) results = adaptor.execute_and_fetchall(sql, (primary_id,)) seq_feature_list = [] for seqfeature_id, seqfeature_type, seqfeature_rank in results: # Get qualifiers [except for db_xref which is stored separately] qvs = adaptor.execute_and_fetchall( "SELECT name, value" " FROM seqfeature_qualifier_value join term using (term_id)" " WHERE seqfeature_id = %s" ' ORDER BY "rank"', (seqfeature_id,), ) qualifiers = {} for qv_name, qv_value in qvs: qualifiers.setdefault(qv_name, []).append(qv_value) # Get db_xrefs [special case of qualifiers] qvs = adaptor.execute_and_fetchall( "SELECT dbxref.dbname, dbxref.accession" " FROM dbxref join seqfeature_dbxref using (dbxref_id)" " WHERE seqfeature_dbxref.seqfeature_id = %s" ' ORDER BY "rank"', (seqfeature_id,), ) for qv_name, qv_value in qvs: value = "%s:%s" % (qv_name, qv_value) qualifiers.setdefault("db_xref", []).append(value) # Get locations results = adaptor.execute_and_fetchall( "SELECT location_id, start_pos, end_pos, strand" " FROM location" " WHERE seqfeature_id = %s" ' ORDER BY "rank"', (seqfeature_id,), ) locations = [] # convert to Python standard form # Convert strand = 0 to strand = None # re: comment in Loader.py: # Biopython uses None when we don't know strand information but # BioSQL requires something (non null) and sets this as zero # So we'll use the strand or 0 if Biopython spits out None for location_id, start, end, strand in results: if start: start -= 1 if strand == 0: strand = None if strand not in (+1, -1, None): raise ValueError( "Invalid strand %s found in database for " "seqfeature_id %s" % (strand, seqfeature_id) ) if start is not None and end is not None and end < start: import warnings from Bio import BiopythonWarning warnings.warn( "Inverted location start/end (%i and %i) for " "seqfeature_id %s" % (start, end, seqfeature_id), BiopythonWarning, ) # For SwissProt unknown positions (?) if start is None: start = SeqFeature.UnknownPosition() if end is None: end = SeqFeature.UnknownPosition() locations.append((location_id, start, end, strand)) # Get possible remote reference information remote_results = adaptor.execute_and_fetchall( "SELECT location_id, dbname, accession, version" " FROM location join dbxref using (dbxref_id)" " WHERE seqfeature_id = %s", (seqfeature_id,), ) lookup = {} for location_id, dbname, accession, version in remote_results: if version and version != "0": v = "%s.%s" % (accession, version) else: v = accession # subfeature remote location db_ref are stored as a empty string # when not present if dbname == "": dbname = None lookup[location_id] = (dbname, v) feature = SeqFeature.SeqFeature(type=seqfeature_type) # Store the key as a private property feature._seqfeature_id = seqfeature_id feature.qualifiers = qualifiers if len(locations) == 0: pass elif len(locations) == 1: location_id, start, end, strand = locations[0] # See Bug 2677, we currently don't record the location_operator # For consistency with older versions Biopython, default to "". feature.location_operator = _retrieve_location_qualifier_value( adaptor, location_id ) dbname, version = lookup.get(location_id, (None, None)) feature.location = SeqFeature.FeatureLocation(start, end) feature.strand = strand feature.ref_db = dbname feature.ref = version else: locs = [] for location in locations: location_id, start, end, strand = location dbname, version = lookup.get(location_id, (None, None)) locs.append( SeqFeature.FeatureLocation( start, end, strand=strand, ref=version, ref_db=dbname ) ) # Locations are typically in biological in order (see negative # strands below), but because of remote locations for # sub-features they are not necessarily in numerical order: strands = {l.strand for l in locs} if len(strands) == 1 and -1 in strands: # Evil hack time for backwards compatibility # TODO - Check if BioPerl and (old) Biopython did the same, # we may have an existing incompatibility lurking here... locs = locs[::-1] feature.location = SeqFeature.CompoundLocation(locs, "join") # TODO - See Bug 2677 - we don't yet record location operator, # so for consistency with older versions of Biopython default # to assuming its a join. seq_feature_list.append(feature) return seq_feature_list def _retrieve_location_qualifier_value(adaptor, location_id): value = adaptor.execute_and_fetch_col0( "SELECT value FROM location_qualifier_value WHERE location_id = %s", (location_id,), ) try: return value[0] except IndexError: return "" def _retrieve_annotations(adaptor, primary_id, taxon_id): annotations = {} annotations.update(_retrieve_alphabet(adaptor, primary_id)) annotations.update(_retrieve_qualifier_value(adaptor, primary_id)) annotations.update(_retrieve_reference(adaptor, primary_id)) annotations.update(_retrieve_taxon(adaptor, primary_id, taxon_id)) annotations.update(_retrieve_comment(adaptor, primary_id)) # Convert values into strings in cases of unicode from the database. # BioSQL could eventually be expanded to be unicode aware. str_anns = {} for key, val in annotations.items(): if isinstance(val, list): val = [_make_unicode_into_string(x) for x in val] elif isinstance(val, str): val = str(val) str_anns[key] = val return str_anns def _make_unicode_into_string(text): if isinstance(text, str): return str(text) else: return text def _retrieve_alphabet(adaptor, primary_id): results = adaptor.execute_and_fetchall( "SELECT alphabet FROM biosequence WHERE bioentry_id = %s", (primary_id,), ) assert len(results) == 1 alphabets = results[0] assert len(alphabets) == 1 alphabet = alphabets[0] if alphabet == "dna": molecule_type = "DNA" elif alphabet == "rna": molecule_type = "RNA" elif alphabet == "protein": molecule_type = "protein" else: molecule_type = None if molecule_type is not None: return {"molecule_type": molecule_type} else: return {} def _retrieve_qualifier_value(adaptor, primary_id): qvs = adaptor.execute_and_fetchall( "SELECT name, value" " FROM bioentry_qualifier_value JOIN term USING (term_id)" " WHERE bioentry_id = %s" ' ORDER BY "rank"', (primary_id,), ) qualifiers = {} for name, value in qvs: if name == "keyword": name = "keywords" # See handling of "date" in Loader.py elif name == "date_changed": name = "date" elif name == "secondary_accession": name = "accessions" qualifiers.setdefault(name, []).append(value) return qualifiers def _retrieve_reference(adaptor, primary_id): # XXX dbxref_qualifier_value refs = adaptor.execute_and_fetchall( "SELECT start_pos, end_pos, " " location, title, authors," " dbname, accession" " FROM bioentry_reference" " JOIN reference USING (reference_id)" " LEFT JOIN dbxref USING (dbxref_id)" " WHERE bioentry_id = %s" ' ORDER BY "rank"', (primary_id,), ) references = [] for start, end, location, title, authors, dbname, accession in refs: reference = SeqFeature.Reference() # If the start/end are missing, reference.location is an empty list if (start is not None) or (end is not None): if start is not None: start -= 1 # python counting reference.location = [SeqFeature.FeatureLocation(start, end)] # Don't replace the default "" with None. if authors: reference.authors = authors if title: reference.title = title reference.journal = location if dbname == "PUBMED": reference.pubmed_id = accession elif dbname == "MEDLINE": reference.medline_id = accession references.append(reference) if references: return {"references": references} else: return {} def _retrieve_taxon(adaptor, primary_id, taxon_id): a = {} common_names = adaptor.execute_and_fetch_col0( "SELECT name FROM taxon_name WHERE taxon_id = %s" " AND name_class = 'genbank common name'", (taxon_id,), ) if common_names: a["source"] = common_names[0] scientific_names = adaptor.execute_and_fetch_col0( "SELECT name FROM taxon_name WHERE taxon_id = %s" " AND name_class = 'scientific name'", (taxon_id,), ) if scientific_names: a["organism"] = scientific_names[0] ncbi_taxids = adaptor.execute_and_fetch_col0( "SELECT ncbi_taxon_id FROM taxon WHERE taxon_id = %s", (taxon_id,) ) if ncbi_taxids and ncbi_taxids[0] and ncbi_taxids[0] != "0": a["ncbi_taxid"] = ncbi_taxids[0] # Old code used the left/right values in the taxon table to get the # taxonomy lineage in one SQL command. This was actually very slow, # and would fail if the (optional) left/right values were missing. # # The following code is based on a contribution from Eric Gibert, and # relies on the taxon table's parent_taxon_id field only (ignoring the # optional left/right values). This means that it has to make a # separate SQL query for each entry in the lineage, but it does still # appear to be *much* faster. See Bug 2494. taxonomy = [] while taxon_id: name, rank, parent_taxon_id = adaptor.execute_one( "SELECT taxon_name.name, taxon.node_rank, taxon.parent_taxon_id" " FROM taxon, taxon_name" " WHERE taxon.taxon_id=taxon_name.taxon_id" " AND taxon_name.name_class='scientific name'" " AND taxon.taxon_id = %s", (taxon_id,), ) if taxon_id == parent_taxon_id: # If the taxon table has been populated by the BioSQL script # load_ncbi_taxonomy.pl this is how top parent nodes are stored. # Personally, I would have used a NULL parent_taxon_id here. break taxonomy.insert(0, name) taxon_id = parent_taxon_id if taxonomy: a["taxonomy"] = taxonomy return a def _retrieve_comment(adaptor, primary_id): qvs = adaptor.execute_and_fetchall( 'SELECT comment_text FROM comment WHERE bioentry_id=%s ORDER BY "rank"', (primary_id,), ) comments = [comm[0] for comm in qvs] # Don't want to add an empty list... if comments: return {"comment": comments} else: return {} class DBSeqRecord(SeqRecord): """BioSQL equivalent of the Biopython SeqRecord object.""" def __init__(self, adaptor, primary_id): """Create a DBSeqRecord object. Arguments: - adaptor - A BioSQL.BioSeqDatabase.Adaptor object - primary_id - An internal integer ID used by BioSQL You wouldn't normally create a DBSeqRecord object yourself, this is done for you when using a BioSeqDatabase object """ self._adaptor = adaptor self._primary_id = primary_id ( self._biodatabase_id, self._taxon_id, self.name, accession, version, self._identifier, self._division, self.description, ) = self._adaptor.execute_one( "SELECT biodatabase_id, taxon_id, name, accession, version," " identifier, division, description" " FROM bioentry" " WHERE bioentry_id = %s", (self._primary_id,), ) if version and version != "0": self.id = "%s.%s" % (accession, version) else: self.id = accession # We don't yet record any per-letter-annotations in the # BioSQL database, but we should set this property up # for completeness (and the __str__ method). # We do NOT want to load the sequence from the DB here! length = _retrieve_seq_len(adaptor, primary_id) self._per_letter_annotations = _RestrictedDict(length=length) def __get_seq(self): if not hasattr(self, "_seq"): self._seq = _retrieve_seq(self._adaptor, self._primary_id) return self._seq def __set_seq(self, seq): # TODO - Check consistent with self._per_letter_annotations self._seq = seq def __del_seq(self): del self._seq seq = property(__get_seq, __set_seq, __del_seq, "Seq object") def __get_dbxrefs(self): if not hasattr(self, "_dbxrefs"): self._dbxrefs = _retrieve_dbxrefs(self._adaptor, self._primary_id) return self._dbxrefs def __set_dbxrefs(self, dbxrefs): self._dbxrefs = dbxrefs def __del_dbxrefs(self): del self._dbxrefs dbxrefs = property( __get_dbxrefs, __set_dbxrefs, __del_dbxrefs, "Database cross references" ) def __get_features(self): if not hasattr(self, "_features"): self._features = _retrieve_features(self._adaptor, self._primary_id) return self._features def __set_features(self, features): self._features = features def __del_features(self): del self._features features = property(__get_features, __set_features, __del_features, "Features") def __get_annotations(self): if not hasattr(self, "_annotations"): self._annotations = _retrieve_annotations( self._adaptor, self._primary_id, self._taxon_id ) if self._identifier: self._annotations["gi"] = self._identifier if self._division: self._annotations["data_file_division"] = self._division return self._annotations def __set_annotations(self, annotations): self._annotations = annotations def __del_annotations(self): del self._annotations annotations = property( __get_annotations, __set_annotations, __del_annotations, "Annotations" )
py
1a3acbb7f190014df9d21df6a97796653cb272e3
# coding: utf-8 """ Module containing various definitions of Stores. Stores are a default access pattern to data and provide various utilities """ import json import yaml from itertools import chain, groupby from socket import socket from typing import Any, Dict, Iterator, List, Optional, Tuple, Union import mongomock from monty.dev import deprecated from monty.io import zopen from monty.json import MSONable, jsanitize from monty.serialization import loadfn from pydash import get, has, set_ from pymongo import MongoClient, ReplaceOne, uri_parser from pymongo.errors import ConfigurationError, DocumentTooLarge, OperationFailure from sshtunnel import SSHTunnelForwarder from maggma.core import Sort, Store, StoreError from maggma.utils import confirm_field_index class SSHTunnel(MSONable): __TUNNELS: Dict[str, SSHTunnelForwarder] = {} def __init__( self, tunnel_server_address: str, remote_server_address: str, username: Optional[str] = None, password: Optional[str] = None, private_key: Optional[str] = None, **kwargs, ): """ Args: tunnel_server_address: string address with port for the SSH tunnel server remote_server_address: string address with port for the server to connect to username: optional username for the ssh tunnel server password: optional password for the ssh tunnel server; If a private_key is supplied this password is assumed to be the private key password private_key: ssh private key to authenticate to the tunnel server kwargs: any extra args passed to the SSHTunnelForwarder """ self.tunnel_server_address = tunnel_server_address self.remote_server_address = remote_server_address self.username = username self.password = password self.private_key = private_key self.kwargs = kwargs if remote_server_address in SSHTunnel.__TUNNELS: self.tunnel = SSHTunnel.__TUNNELS[remote_server_address] else: open_port = _find_free_port("127.0.0.1") local_bind_address = ("127.0.0.1", open_port) ssh_address, ssh_port = tunnel_server_address.split(":") ssh_port = int(ssh_port) # type: ignore remote_bind_address, remote_bind_port = remote_server_address.split(":") remote_bind_port = int(remote_bind_port) # type: ignore if private_key is not None: ssh_password = None ssh_private_key_password = password else: ssh_password = password ssh_private_key_password = None self.tunnel = SSHTunnelForwarder( ssh_address_or_host=(ssh_address, ssh_port), local_bind_address=local_bind_address, remote_bind_address=(remote_bind_address, remote_bind_port), ssh_username=username, ssh_password=ssh_password, ssh_private_key_password=ssh_private_key_password, ssh_pkey=private_key, **kwargs, ) def start(self): if not self.tunnel.is_active: self.tunnel.start() def stop(self): if self.tunnel.tunnel_is_up: self.tunnel.stop() @property def local_address(self) -> Tuple[str, int]: return self.tunnel.local_bind_address class MongoStore(Store): """ A Store that connects to a Mongo collection """ def __init__( self, database: str, collection_name: str, host: str = "localhost", port: int = 27017, username: str = "", password: str = "", ssh_tunnel: Optional[SSHTunnel] = None, safe_update: bool = False, **kwargs, ): """ Args: database: The database name collection_name: The collection name host: Hostname for the database port: TCP port to connect to username: Username for the collection password: Password to connect with safe_update: fail gracefully on DocumentTooLarge errors on update """ self.database = database self.collection_name = collection_name self.host = host self.port = port self.username = username self.password = password self.ssh_tunnel = ssh_tunnel self.safe_update = safe_update self._collection = None # type: Any self.kwargs = kwargs super().__init__(**kwargs) @property def name(self) -> str: """ Return a string representing this data source """ return f"mongo://{self.host}/{self.database}/{self.collection_name}" def connect(self, force_reset: bool = False): """ Connect to the source data """ if not self._collection or force_reset: if self.ssh_tunnel is None: conn = MongoClient(self.host, self.port) else: self.ssh_tunnel.start() host, port = self.ssh_tunnel.local_address conn = MongoClient(host=host, port=port) db = conn[self.database] if self.username != "": db.authenticate(self.username, self.password) self._collection = db[self.collection_name] def __hash__(self) -> int: """Hash for MongoStore""" return hash((self.database, self.collection_name, self.last_updated_field)) @classmethod def from_db_file(cls, filename: str): """ Convenience method to construct MongoStore from db_file from old QueryEngine format """ kwargs = loadfn(filename) if "collection" in kwargs: kwargs["collection_name"] = kwargs.pop("collection") # Get rid of aliases from traditional query engine db docs kwargs.pop("aliases", None) return cls(**kwargs) @classmethod def from_launchpad_file(cls, lp_file, collection_name): """ Convenience method to construct MongoStore from a launchpad file Note: A launchpad file is a special formatted yaml file used in fireworks Returns: """ with open(lp_file, 'r') as f: lp_creds = yaml.load(f, Loader=None) db_creds = lp_creds.copy() db_creds['database'] = db_creds['name'] for key in list(db_creds.keys()): if key not in ['database', 'host', 'port', 'username', 'password']: db_creds.pop(key) db_creds['collection_name'] = collection_name return cls(**db_creds) def distinct( self, field: str, criteria: Optional[Dict] = None, all_exist: bool = False ) -> List: """ Get all distinct values for a field Args: field: the field(s) to get distinct values for criteria: PyMongo filter for documents to search in """ criteria = criteria or {} try: distinct_vals = self._collection.distinct(field, criteria) except (OperationFailure, DocumentTooLarge): distinct_vals = [ d["_id"] for d in self._collection.aggregate( [{"$match": criteria}, {"$group": {"_id": f"${field}"}}] ) ] if all(isinstance(d, list) for d in filter(None, distinct_vals)): # type: ignore distinct_vals = list(chain.from_iterable(filter(None, distinct_vals))) return distinct_vals if distinct_vals is not None else [] def groupby( self, keys: Union[List[str], str], criteria: Optional[Dict] = None, properties: Union[Dict, List, None] = None, sort: Optional[Dict[str, Union[Sort, int]]] = None, skip: int = 0, limit: int = 0, ) -> Iterator[Tuple[Dict, List[Dict]]]: """ Simple grouping function that will group documents by keys. Args: keys: fields to group documents criteria: PyMongo filter for documents to search in properties: properties to return in grouped documents sort: Dictionary of sort order for fields. Keys are field names and values are 1 for ascending or -1 for descending. skip: number documents to skip limit: limit on total number of documents returned Returns: generator returning tuples of (key, list of docs) """ pipeline = [] if isinstance(keys, str): keys = [keys] if properties is None: properties = [] if isinstance(properties, dict): properties = list(properties.keys()) if criteria is not None: pipeline.append({"$match": criteria}) if len(properties) > 0: pipeline.append({"$project": {p: 1 for p in properties + keys}}) alpha = "abcdefghijklmnopqrstuvwxyz" group_id = {letter: f"${key}" for letter, key in zip(alpha, keys)} pipeline.append({"$group": {"_id": group_id, "docs": {"$push": "$$ROOT"}}}) for d in self._collection.aggregate(pipeline, allowDiskUse=True): id_doc = {} # type: Dict[str,Any] for letter, key in group_id.items(): if has(d["_id"], letter): set_(id_doc, key[1:], d["_id"][letter]) yield (id_doc, d["docs"]) @classmethod def from_collection(cls, collection): """ Generates a MongoStore from a pymongo collection object This is not a fully safe operation as it gives dummy information to the MongoStore As a result, this will not serialize and can not reset its connection Args: collection: the PyMongo collection to create a MongoStore around """ # TODO: How do we make this safer? coll_name = collection.name db_name = collection.database.name store = cls(db_name, coll_name) store._collection = collection return store @property # type: ignore @deprecated(message="This will be removed in the future") def collection(self): """Property referring to underlying pymongo collection""" if self._collection is None: raise StoreError("Must connect Mongo-like store before attemping to use it") return self._collection def count(self, criteria: Optional[Dict] = None) -> int: """ Counts the number of documents matching the query criteria Args: criteria: PyMongo filter for documents to count in """ criteria = criteria if criteria else {} return self._collection.find(filter=criteria).count() def query( self, criteria: Optional[Dict] = None, properties: Union[Dict, List, None] = None, sort: Optional[Dict[str, Union[Sort, int]]] = None, skip: int = 0, limit: int = 0, ) -> Iterator[Dict]: """ Queries the Store for a set of documents Args: criteria: PyMongo filter for documents to search in properties: properties to return in grouped documents sort: Dictionary of sort order for fields. Keys are field names and values are 1 for ascending or -1 for descending. skip: number documents to skip limit: limit on total number of documents returned """ if isinstance(properties, list): properties = {p: 1 for p in properties} sort_list = ( [ (k, Sort(v).value) if isinstance(v, int) else (k, v.value) for k, v in sort.items() ] if sort else None ) for d in self._collection.find( filter=criteria, projection=properties, skip=skip, limit=limit, sort=sort_list, ): yield d def ensure_index(self, key: str, unique: Optional[bool] = False) -> bool: """ Tries to create an index and return true if it suceeded Args: key: single key to index unique: Whether or not this index contains only unique keys Returns: bool indicating if the index exists/was created """ if confirm_field_index(self._collection, key): return True else: try: self._collection.create_index(key, unique=unique, background=True) return True except Exception: return False def update(self, docs: Union[List[Dict], Dict], key: Union[List, str, None] = None): """ Update documents into the Store Args: docs: the document or list of documents to update key: field name(s) to determine uniqueness for a document, can be a list of multiple fields, a single field, or None if the Store's key field is to be used """ requests = [] if not isinstance(docs, list): docs = [docs] for d in docs: d = jsanitize(d, allow_bson=True) # document-level validation is optional validates = True if self.validator: validates = self.validator.is_valid(d) if not validates: if self.validator.strict: raise ValueError(self.validator.validation_errors(d)) else: self.logger.error(self.validator.validation_errors(d)) if validates: key = key or self.key if isinstance(key, list): search_doc = {k: d[k] for k in key} else: search_doc = {key: d[key]} requests.append(ReplaceOne(search_doc, d, upsert=True)) if len(requests) > 0: try: self._collection.bulk_write(requests, ordered=False) except (OperationFailure, DocumentTooLarge) as e: if self.safe_update: for req in requests: req._filter try: self._collection.bulk_write([req], ordered=False) except (OperationFailure, DocumentTooLarge): self.logger.error( f"Could not upload document for {req._filter} as it was too large for Mongo" ) else: raise e def remove_docs(self, criteria: Dict): """ Remove docs matching the query dictionary Args: criteria: query dictionary to match """ self._collection.delete_many(filter=criteria) def close(self): """Close up all collections""" self._collection.database.client.close() if self.ssh_tunnel is not None: self.ssh_tunnel.stop() def __eq__(self, other: object) -> bool: """ Check equality for MongoStore other: other mongostore to compare with """ if not isinstance(other, MongoStore): return False fields = ["database", "collection_name", "host", "port", "last_updated_field"] return all(getattr(self, f) == getattr(other, f) for f in fields) class MongoURIStore(MongoStore): """ A Store that connects to a Mongo collection via a URI This is expected to be a special mongodb+srv:// URIs that include client parameters via TXT records """ def __init__( self, uri: str, collection_name: str, database: str = None, ssh_tunnel: Optional[SSHTunnel] = None, **kwargs, ): """ Args: uri: MongoDB+SRV URI database: database to connect to collection_name: The collection name """ self.uri = uri self.ssh_tunnel = ssh_tunnel # parse the dbname from the uri if database is None: d_uri = uri_parser.parse_uri(uri) if d_uri["database"] is None: raise ConfigurationError( "If database name is not supplied, a database must be set in the uri" ) self.database = d_uri["database"] else: self.database = database self.collection_name = collection_name self.kwargs = kwargs self._collection = None super(MongoStore, self).__init__(**kwargs) # lgtm @property def name(self) -> str: """ Return a string representing this data source """ # TODO: This is not very safe since it exposes the username/password info return self.uri def connect(self, force_reset: bool = False): """ Connect to the source data """ if not self._collection or force_reset: conn = MongoClient(self.uri) db = conn[self.database] self._collection = db[self.collection_name] class MemoryStore(MongoStore): """ An in-memory Store that functions similarly to a MongoStore """ def __init__(self, collection_name: str = "memory_db", **kwargs): """ Initializes the Memory Store Args: collection_name: name for the collection in memory """ self.collection_name = collection_name self._collection = None self.ssh_tunnel = None # This is to fix issues with the tunnel on close self.kwargs = kwargs super(MongoStore, self).__init__(**kwargs) # noqa def connect(self, force_reset: bool = False): """ Connect to the source data """ if not self._collection or force_reset: self._collection = mongomock.MongoClient().db[self.name] @property def name(self): """Name for the store""" return f"mem://{self.collection_name}" def __hash__(self): """Hash for the store""" return hash((self.name, self.last_updated_field)) def groupby( self, keys: Union[List[str], str], criteria: Optional[Dict] = None, properties: Union[Dict, List, None] = None, sort: Optional[Dict[str, Union[Sort, int]]] = None, skip: int = 0, limit: int = 0, ) -> Iterator[Tuple[Dict, List[Dict]]]: """ Simple grouping function that will group documents by keys. Args: keys: fields to group documents criteria: PyMongo filter for documents to search in properties: properties to return in grouped documents sort: Dictionary of sort order for fields. Keys are field names and values are 1 for ascending or -1 for descending. skip: number documents to skip limit: limit on total number of documents returned Returns: generator returning tuples of (key, list of elemnts) """ keys = keys if isinstance(keys, list) else [keys] data = [ doc for doc in self.query(properties=keys, criteria=criteria) if all(has(doc, k) for k in keys) ] def grouping_keys(doc): return tuple(get(doc, k) for k in keys) for vals, group in groupby(sorted(data, key=grouping_keys), key=grouping_keys): doc = {} # type: Dict[Any,Any] for k, v in zip(keys, vals): set_(doc, k, v) yield doc, list(group) def __eq__(self, other: object) -> bool: """ Check equality for MemoryStore other: other MemoryStore to compare with """ if not isinstance(other, MemoryStore): return False fields = ["collection_name", "last_updated_field"] return all(getattr(self, f) == getattr(other, f) for f in fields) class JSONStore(MemoryStore): """ A Store for access to a single or multiple JSON files """ def __init__(self, paths: Union[str, List[str]], **kwargs): """ Args: paths: paths for json files to turn into a Store """ paths = paths if isinstance(paths, (list, tuple)) else [paths] self.paths = paths self.kwargs = kwargs super().__init__(collection_name="collection", **kwargs) def connect(self, force_reset=False): """ Loads the files into the collection in memory """ super().connect(force_reset=force_reset) for path in self.paths: with zopen(path) as f: data = f.read() data = data.decode() if isinstance(data, bytes) else data objects = json.loads(data) objects = [objects] if not isinstance(objects, list) else objects self.update(objects) def __hash__(self): return hash((*self.paths, self.last_updated_field)) def __eq__(self, other: object) -> bool: """ Check equality for JSONStore Args: other: other JSONStore to compare with """ if not isinstance(other, JSONStore): return False fields = ["paths", "last_updated_field"] return all(getattr(self, f) == getattr(other, f) for f in fields) def _find_free_port(address="0.0.0.0"): s = socket() s.bind((address, 0)) # Bind to a free port provided by the host. return s.getsockname()[1] # Return the port number assigned.
py
1a3acc859e344c2970c2583cda25348b824fa900
# Copyright 2021 The PyBigQuery Authors # # Use of this source code is governed by an MIT-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/MIT. from unittest import mock import google.auth import google.auth.credentials from google.oauth2 import service_account import pytest class AnonymousCredentialsWithProject(google.auth.credentials.AnonymousCredentials): """Fake credentials to trick isinstance""" def __init__(self, project): super().__init__() self.project_id = project def with_scopes(self, scopes): return self @pytest.fixture(scope="session") def module_under_test(): from pybigquery import _helpers return _helpers def test_create_bigquery_client_with_credentials_path(monkeypatch, module_under_test): mock_service_account = mock.create_autospec(service_account.Credentials) mock_service_account.from_service_account_file.return_value = AnonymousCredentialsWithProject( "service-account-project" ) monkeypatch.setattr(service_account, "Credentials", mock_service_account) bqclient = module_under_test.create_bigquery_client( credentials_path="path/to/key.json", ) assert bqclient.project == "service-account-project" def test_create_bigquery_client_with_credentials_path_respects_project( monkeypatch, module_under_test ): """Test that project_id is used, even when there is a default project. https://github.com/googleapis/python-bigquery-sqlalchemy/issues/48 """ mock_service_account = mock.create_autospec(service_account.Credentials) mock_service_account.from_service_account_file.return_value = AnonymousCredentialsWithProject( "service-account-project" ) monkeypatch.setattr(service_account, "Credentials", mock_service_account) bqclient = module_under_test.create_bigquery_client( credentials_path="path/to/key.json", project_id="connection-url-project", ) assert bqclient.project == "connection-url-project" def test_create_bigquery_client_with_credentials_info(monkeypatch, module_under_test): mock_service_account = mock.create_autospec(service_account.Credentials) mock_service_account.from_service_account_info.return_value = AnonymousCredentialsWithProject( "service-account-project" ) monkeypatch.setattr(service_account, "Credentials", mock_service_account) bqclient = module_under_test.create_bigquery_client( credentials_info={ "type": "service_account", "project_id": "service-account-project", }, ) assert bqclient.project == "service-account-project" def test_create_bigquery_client_with_credentials_info_respects_project( monkeypatch, module_under_test ): """Test that project_id is used, even when there is a default project. https://github.com/googleapis/python-bigquery-sqlalchemy/issues/48 """ mock_service_account = mock.create_autospec(service_account.Credentials) mock_service_account.from_service_account_info.return_value = AnonymousCredentialsWithProject( "service-account-project" ) monkeypatch.setattr(service_account, "Credentials", mock_service_account) bqclient = module_under_test.create_bigquery_client( credentials_info={ "type": "service_account", "project_id": "service-account-project", }, project_id="connection-url-project", ) assert bqclient.project == "connection-url-project" def test_create_bigquery_client_with_default_credentials( monkeypatch, module_under_test ): def mock_default_credentials(*args, **kwargs): return (google.auth.credentials.AnonymousCredentials(), "default-project") monkeypatch.setattr(google.auth, "default", mock_default_credentials) bqclient = module_under_test.create_bigquery_client() assert bqclient.project == "default-project" def test_create_bigquery_client_with_default_credentials_respects_project( monkeypatch, module_under_test ): """Test that project_id is used, even when there is a default project. https://github.com/googleapis/python-bigquery-sqlalchemy/issues/48 """ def mock_default_credentials(*args, **kwargs): return (google.auth.credentials.AnonymousCredentials(), "default-project") monkeypatch.setattr(google.auth, "default", mock_default_credentials) bqclient = module_under_test.create_bigquery_client( project_id="connection-url-project", ) assert bqclient.project == "connection-url-project"
py
1a3acde4d50c9055caf5617ce0749c9be39aeef1
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from django import forms from django.contrib.auth import get_user_model from django.forms.utils import ErrorDict from django.utils.translation import ugettext_lazy as _ from shop.conf import app_settings as shop_settings from shop.modifiers.pool import cart_modifiers_pool from shopit.conf import app_settings from shopit.forms.account import AccountDetailsForm, CleanEmailMixin from shopit.models.address import ISO_3166_CODES, BillingAddress, ShippingAddress from shopit.models.cart import CartDiscountCode from shopit.models.customer import Customer from shopit.models.modifier import DiscountCode from shopit.utils import get_error_message as em class CartDiscountCodeForm(forms.ModelForm): """ Form that handles entering a cart modifier code. """ _discount_code = None class Meta: model = CartDiscountCode fields = ['code'] def __init__(self, *args, **kwargs): self.cart = kwargs.pop('cart') kwargs['instance'] = CartDiscountCode(cart=self.cart) super(CartDiscountCodeForm, self).__init__(*args, **kwargs) self.fields['code'].required = False self.fields['code'].label = _('Discount code') def clean_code(self): code = self.cleaned_data.get('code', None) if code: cart_codes = self.cart.get_discount_codes().values_list('code', flat=True) if code in cart_codes: raise forms.ValidationError(em('cart_discount_code_exists')) try: dc = DiscountCode.objects.valid().get(code=code) except DiscountCode.DoesNotExist: raise forms.ValidationError(em('cart_discount_code_invalid')) if dc.customer and code not in self.cart.customer.get_discount_codes().values_list('code', flat=True): raise forms.ValidationError(em('cart_discount_code_wrong_customer')) self._discount_code = dc return code def save(self, commit=True): if self._discount_code is not None: self._discount_code.use() # increment `num_uses` field on DiscountCode. return super(CartDiscountCodeForm, self).save(commit) class CheckoutFormMixin(object): """ Checkout form mixin ensures request and cart are passed in. """ def __init__(self, *args, **kwargs): self.request = kwargs.pop('request') self.cart = kwargs.pop('cart') super(CheckoutFormMixin, self).__init__(*args, **kwargs) class CustomerForm(CheckoutFormMixin, AccountDetailsForm): def __init__(self, *args, **kwargs): self.cart = kwargs.pop('cart') return AccountDetailsForm.__init__(self, *args, **kwargs) def save(self, commit=True): self.instance.recognize_as_registered() return super(CustomerForm, self).save(commit) class GuestForm(CheckoutFormMixin, CleanEmailMixin, forms.ModelForm): email = forms.EmailField(label=_('Email address')) phone_number = forms.CharField(label=_('Phone number')) class Meta: model = get_user_model() fields = ['email'] def __init__(self, *args, **kwargs): super(GuestForm, self).__init__(*args, **kwargs) self.customer = Customer.objects.get_from_request(self.request) self.instance = self.customer.user self.fields['email'].initial = self.instance.email self.fields['phone_number'].initial = self.customer.phone_number self.fields['phone_number'].required = app_settings.PHONE_NUMBER_REQUIRED def save(self, commit=True): self.customer.recognize_as_guest() self.instance.is_active = shop_settings.SHOP_GUEST_IS_ACTIVE_USER if self.instance.is_active: password = get_user_model().objects.make_random_password(length=30) self.instance.set_password(password) self.customer.phone_number = self.cleaned_data.get('phone_number', '') self.customer.save() return super(GuestForm, self).save(commit) class AddressForm(CheckoutFormMixin, forms.ModelForm): priority = forms.IntegerField( required=False, widget=forms.HiddenInput, ) existant = forms.ModelChoiceField( required=False, queryset=None, label=_('Use existant address'), ) # Field decides if a primary address should be used instead. # Primary address is set to either 'shipping' or 'billing' using `PRIMARY_ADDRESS` setting. use_primary_address = forms.BooleanField( required=False, initial=True, ) class Meta: exclude = ['customer'] def __init__(self, *args, **kwargs): self.field_order = ['existant'] # Place `existant` field at the top. super(AddressForm, self).__init__(*args, **kwargs) self.customer = Customer.objects.get_from_request(self.request) # Set existant addresses choices. addresses = self.Meta.model.objects.filter(customer=self.customer).order_by('-priority') self.fields['existant'].queryset = addresses if not addresses.exists(): self.fields['existant'].widget = forms.HiddenInput() # Set country choices based on `ADDRESS_COUNTRIES` setting. if app_settings.ADDRESS_COUNTRIES: countries = [('', '---------')] + [x for x in ISO_3166_CODES if x in app_settings.ADDRESS_COUNTRIES] self.fields['country'].widget = forms.Select(choices=countries) self.fields['country'].choices = countries if self.is_primary: self.fields.pop('use_primary_address') # remove field from primary address. else: self.fields['use_primary_address'].initial = \ getattr(self.cart, '%s_address' % self.address_type, None) is None if hasattr(self, 'use_primary_address_label'): self.fields['use_primary_address'].label = self.use_primary_address_label # If current address is set to the cart, use it as existant one. cart_address = getattr(self.cart, '%s_address' % self.address_type, None) if cart_address: self.fields['existant'].initial = cart_address for fname in [f.name for f in cart_address._meta.get_fields() if f.name in self.fields]: self.fields[fname].initial = getattr(cart_address, fname, '') def full_clean(self): super(AddressForm, self).full_clean() if not self.is_primary: if self.is_bound and self['use_primary_address'].value(): self._errors = ErrorDict() def is_valid(self): if not self.is_primary: return self['use_primary_address'].value() or super(AddressForm, self).is_valid() return super(AddressForm, self).is_valid() def clean(self): existant = self.cleaned_data['existant'] if existant: self.instance = existant # Set existant as an instance if selected. self.cleaned_data['priority'] = existant.priority # Populate missing fields in `cleaned_data` with existant data and skip validation. for field in [x for x in self.fields if x not in self.cleaned_data]: self.cleaned_data[field] = getattr(existant, field) del self._errors[field] else: self.cleaned_data['priority'] = self.Meta.model.objects.get_max_priority(self.customer) + 1 return super(AddressForm, self).clean() def save(self, commit=True): if self.is_primary or not self['use_primary_address'].value(): instance = super(AddressForm, self).save(commit=False) instance.customer = self.customer instance.priority = self.cleaned_data['priority'] instance.save() return instance @property def address_type(self): return self.Meta.model.__name__.lower().rstrip('address') @property def is_primary(self): return app_settings.PRIMARY_ADDRESS == self.address_type class ShippingAddressForm(AddressForm): use_primary_address_label = _('Use billing address for shipping') class Meta(AddressForm.Meta): model = ShippingAddress class BillingAddressForm(AddressForm): use_primary_address_label = _('Use shipping address for billing') class Meta(AddressForm.Meta): model = BillingAddress class PaymentMethodForm(CheckoutFormMixin, forms.Form): payment_modifier = forms.ChoiceField( label=_('Payment method'), widget=forms.RadioSelect, ) def __init__(self, *args, **kwargs): super(PaymentMethodForm, self).__init__(*args, **kwargs) choices = [x.get_choice() for x in cart_modifiers_pool.get_payment_modifiers() if not x.is_disabled(self.cart)] self.fields['payment_modifier'].choices = choices if len(choices) == 1: self.fields['payment_modifier'].initial = choices[0][0] class DeliveryMethodForm(CheckoutFormMixin, forms.Form): shipping_modifier = forms.ChoiceField( label=_('Delivery method'), widget=forms.RadioSelect, ) def __init__(self, *args, **kwargs): super(DeliveryMethodForm, self).__init__(*args, **kwargs) choices = [x.get_choice() for x in cart_modifiers_pool.get_shipping_modifiers() if not x.is_disabled(self.cart)] self.fields['shipping_modifier'].choices = choices if len(choices) == 1: self.fields['shipping_modifier'].initial = choices[0][0] class ExtraAnnotationForm(CheckoutFormMixin, forms.Form): annotation = forms.CharField( label=_('Extra annotation for this order'), required=False, widget=forms.Textarea, ) class AcceptConditionForm(CheckoutFormMixin, forms.Form): accept = forms.BooleanField( label=_('Accept'), required=True, widget=forms.CheckboxInput, )
py
1a3ace44b41d1416cc900579edfe928a9212da68
from .output import Output import gevent import gevent.monkey gevent.monkey.patch_socket() import urllib2 import json class Elasticsearch(Output): """Outputs to an elasticsearch index. :param string host: elasticsearch host :param integer port: elasticsearch port :param string index: (required) elasticsearch index. This can be formatted by fields in the event. :param string type: (required) elasticsearch type. This can be formatted by fields in the event. Example configuration for kibana:: Mutate(rename={'@timestamp': 'timestamp', '@message': 'message'}) Elasticsearch(index='logstash-{@timestamp:%Y.%m.%d}', type='event') """ RETRIES = 10 def __init__(self, index, type, host='localhost', port=9200): super(Elasticsearch, self).__init__() self.host = host self.port = port self.index = index self.type = type def process(self, event): data = event.to_json() index = event.format(self.index) itype = event.format(self.type) if not index: raise ValueError("index is empty") if not itype: raise ValueError("type is empty") url = 'http://%s:%s/%s/%s/' % (self.host, self.port, index, itype) success = False delay = 1.0 for retry in xrange(self.RETRIES): try: res = urllib2.urlopen(url, data=data) # 200 response indicates all is well success = True result = json.load(res) break except urllib2.HTTPError as ex: if ex.getcode() == 400: # Bad Request - do not retry self.logger.error("Bad request: %s, not retrying" % (ex,)) break else: delay *= 2.0 self.logger.warn('Unable to index: %s, retrying in %ds' % (ex, delay)) gevent.sleep(delay) except urllib2.URLError as ex: delay *= 2.0 self.logger.warn('Unable to index: %s, retrying in %ds' % (ex, delay)) gevent.sleep(delay) if success: self.logger.debug('Indexed to elasticsearch: index:%s type:%s id:%s' % (index, itype, result['_id']))
py
1a3ace9b481837f6e89bb6fffd310e1cbdfa7270
#!/usr/bin/env python3 # Copyright (c) 2017-2019 The Rhombus Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import json from test_framework.test_rhombus import RhombusTestFramework, isclose, connect_nodes_bi class ExtKeyTest(RhombusTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [ ['-debug','-reservebalance=10000000'] for i in range(self.num_nodes)] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self, split=False): self.add_nodes(self.num_nodes, extra_args=self.extra_args) self.start_nodes() connect_nodes_bi(self.nodes, 0, 1) connect_nodes_bi(self.nodes, 0, 2) self.sync_all() def run_test(self): node = self.nodes[0] node1 = self.nodes[1] ro = node.extkeyimportmaster('abandon baby cabbage dad eager fabric gadget habit ice kangaroo lab absorb') assert(ro['account_id'] == 'aaaZf2qnNr5T7PWRmqgmusuu5ACnBcX2ev') assert(node.getwalletinfo()['total_balance'] == 100000) # Start staking node.walletsettings('stakelimit', {'height':1}) node.reservebalance(False) assert(self.wait_for_height(node, 1)) # stop staking node.reservebalance(True, 10000000) node1.reservebalance(True, 10000000) ro = node1.extkeyimportmaster('drip fog service village program equip minute dentist series hawk crop sphere olympic lazy garbage segment fox library good alley steak jazz force inmate') assert(ro['account_id'] == 'ahL1QdHhzNCtZWJzv36ScfPipJP1cUzAD8') extAddrTo = node1.getnewextaddress('test label') assert(extAddrTo == 'pparszNYZ1cpWxnNieFqHCV2rtXmG74a4WAXHHhXaRATzzU6kMixjy1rXDM1UM4LVgkXRpLNM1rQNvkgLf7kUeMXiyaBMK8aSR3td4b4cX4epnHF') ro = node1.filteraddresses() assert(len(ro) == 1) assert(ro[0]['label'] == 'test label') ro = node1.getaddressinfo(extAddrTo) assert(ro['ismine'] == True) assert(ro['isextkey'] == True) ro = node1.dumpprivkey(extAddrTo) assert(ro == 'xparFnnG7xJkEekTjWGumcEY1BKgryY4txW5Ce56KQPBJG7u3cNsUHxGgjVwHGEaxUGDAjT4SXv7fkWkp4TFaFHjaoZVh8Zricnwz3DjAxtqtmi') txnHash = node.sendtoaddress(extAddrTo, 10) ro = node.getmempoolentry(txnHash) assert(ro['height'] == 1) # start staking node.walletsettings('stakelimit', {'height':2}) node.reservebalance(False) assert(self.wait_for_height(node, 2)) # stop staking ro = node.reservebalance(True, 10000000) ro = node1.listtransactions() assert(len(ro) == 1) assert(ro[0]['address'] == 'pkGv5xgviEAEjwpRPeEt8c9cvraw2umKYo') assert(ro[0]['amount'] == 10) ro = node1.getwalletinfo() assert(ro['total_balance'] == 10) block2_hash = node.getblockhash(2) ro = node.getblock(block2_hash) assert(txnHash in ro['tx']) txnHash2 = node.sendtoaddress(extAddrTo, 20, '', '', False, 'narration test') assert(self.wait_for_mempool(node1, txnHash2)) ro = node1.listtransactions() assert(len(ro) == 2) assert(ro[1]['address'] == 'pbo5e7tsLJBdUcCWteTTkGBxjW8Xy12o1V') assert(ro[1]['amount'] == 20) assert('narration test' in ro[1].values()) ro = node.listtransactions() assert('narration test' in ro[-1].values()) extAddrTo0 = node.getnewextaddress() txnHashes = [] for k in range(24): v = round(0.01 * float(k+1), 5) node1.syncwithvalidationinterfacequeue() txnHash = node1.sendtoaddress(extAddrTo0, v, '', '', False) txnHashes.append(txnHash) for txnHash in txnHashes: assert(self.wait_for_mempool(node, txnHash)) ro = node.listtransactions('*', 24) assert(len(ro) == 24) assert[isclose(ro[0]['amount'], 0.01)] assert[isclose(ro[23]['amount'], 0.24)] assert[ro[23]['address'] == 'pm23xKs3gy6AhZZ7JZe61Rn1m8VB83P49d'] # start staking node.walletsettings('stakelimit', {'height':3}) node.reservebalance(False) assert(self.wait_for_height(node, 3)) block3_hash = node.getblockhash(3) ro = node.getblock(block3_hash) for txnHash in txnHashes: assert(txnHash in ro['tx']) # Test bech32 encoding ek_b32 = 'tpep1q3ehtcetqqqqqqesj04mypkmhnly5rktqmcpmjuq09lyevcsjxrgra6x8trd52vp2vpsk6kf86v3npg6x66ymrn5yrqnclxtqrlfdlw3j4f0309dhxct8kc68paxt' assert(node.getnewextaddress('lbl_b32', '', True) == ek_b32) assert(ek_b32 in json.dumps(node.filteraddresses())) if __name__ == '__main__': ExtKeyTest().main()
py
1a3acf27e401626313794d1d0b0080590e541c33
#!/usr/bin/env python """ lgc/main.py Note to program performers: - parallel_pr_nibble produces the same results as ligra's `apps/localAlg/ACL-Sync-Local-Opt.C` - ista produces the same results as LocalGraphClustering's `ista_dinput_dense` method """ import os import sys import argparse import numpy as np from time import time from tqdm import tqdm from scipy import sparse from scipy.io import mmread from scipy.stats import spearmanr # -- # Parallel PR-Nibble def parallel_pr_nibble(seeds, degrees, num_nodes, adj_indices, adj_indptr, alpha, epsilon): out = [] for seed in tqdm(seeds): p = np.zeros(num_nodes) r = np.zeros(num_nodes) r[seed] = 1 frontier = np.array([seed]) while True: if len(frontier) == 0: break r_prime = r.copy() for node_idx in frontier: p[node_idx] += (2 * alpha) / (1 + alpha) * r[node_idx] r_prime[node_idx] = 0 for src_idx in frontier: neighbors = adj_indices[adj_indptr[src_idx]:adj_indptr[src_idx + 1]] for dst_idx in neighbors: update = ((1 - alpha) / (1 + alpha)) * r[src_idx] / degrees[src_idx] r_prime[dst_idx] += update r = r_prime frontier = np.where((r >= degrees * epsilon) & (degrees > 0))[0] out.append(p) return np.column_stack(out) # -- # Run def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--num-seeds', type=int, default=50) parser.add_argument('--alpha', type=float, default=0.15) parser.add_argument('--pnib-epsilon', type=float, default=1e-6) args = parser.parse_args() # !! In order to check accuracy, you _must_ use these parameters !! assert args.num_seeds == 50 assert args.alpha == 0.15 assert args.pnib_epsilon == 1e-6 return args args = parse_args() adj = mmread('data/jhu.mtx').tocsr() degrees = np.asarray(adj.sum(axis=-1)).squeeze().astype(int) num_nodes = adj.shape[0] adj_indices = adj.indices adj_indptr = adj.indptr pnib_seeds = np.array(range(args.num_seeds)) alpha = args.alpha pnib_epsilon = args.pnib_epsilon t = time() pnib_scores = parallel_pr_nibble(pnib_seeds, degrees, num_nodes, adj_indices, adj_indptr, alpha=alpha, epsilon=pnib_epsilon) t2 = time() assert pnib_scores.shape[0] == adj.shape[0] assert pnib_scores.shape[1] == len(pnib_seeds) pnib_elapsed = time() - t print("[Nibble Elapsed Time]: ", (t2 - t)) os.makedirs('results', exist_ok=True) np.savetxt('results/pnib_score.txt', pnib_scores) open('results/pnib_elapsed', 'w').write(str(pnib_elapsed))
py
1a3acf452f7246ca31f73dd49dd58b9c15210312
from .mikuapi import Miku def setup(bot): bot.add_cog(Miku(bot))
py
1a3acf6915e3705e9938f6ec20f9a216cce4447a
# Ex. 095 += # jogador = {'Nome': 'Joelson', 'Gols': [2, 1, 0, 0, 3], 'Total': 6} jogador = {} jogadores, gols = [], [] posGol = 0 Blue, Normal, Color, Red = "\033[34m", "\033[m", "\033[36m", "\033[31m" while True: jogador['Nome'] = str(input("Nome do jogador: ")).capitalize() partidas = int(input(f"Quantas partidas {jogador['Nome']} jogou? ")) for i in range(1, partidas + 1): gol = int(input(f" Quantos gols na {i}ª partida? ")) gols.append(gol) jogador['Gols'] = gols[:] # Cópia de 'gols' jogador['Total'] = sum(gols) # sum() faz a soma de algo iterável. No caso, uma lista jogadores.append(jogador.copy()) # Passa uma cópia do jogador atual para a lista de jogadores gols.clear() # Zera a lista de gols resp = str(input("Deseja continuar [s/n]? ")).lower() if "s" in resp: print() else: break print("-" * 50) # Placar geral print(f"{Blue}Nº Nome Gols Total{Normal}") # jogadores = [{'Nome': 'Joel', 'Gols': [0, 0, 3], 'Total': 3}, {'Nome': 'Tody', 'Gols': [2, 2, 0], 'Total': 4}] for i in range(len(jogadores)): print(f"{i:<2} {jogadores[i]['Nome']:<13} {str(jogadores[i]['Gols']):<15} {jogadores[i]['Total']:<5}") print("-" * 50) while True: quem = int(input("Deseja ver o placar de quem [-1 para sair]? ")) if quem == -1: # Sair break elif quem > len(jogadores) or quem < 0: # Inválido print(f"{Red}Jogador não existe, tente novamente!{Normal}") else: # Válido print(f"{Color} -- Levantamento do jogador {jogadores[quem]['Nome']} --{Normal}") for pos, numGols in enumerate(jogadores[quem]['Gols']): # Tamanho da lista de gols print(f" => Na partida {pos}, fez {numGols} gol(s).") print("-" * 50)
py
1a3acfa9c50106ff2a8152e540b046237db7314a
import time import logging import pytest import operator import numpy as np from copy import copy from ophyd.epics_motor import EpicsMotor from ophyd.pseudopos import PseudoPositioner, PseudoSingle from ophyd import Component as C from ophyd.utils import ExceptionBundle logger = logging.getLogger(__name__) def setUpModule(): logging.getLogger("ophyd.pseudopos").setLevel(logging.DEBUG) def tearDownModule(): logger.debug("Cleaning up") logging.getLogger("ophyd.pseudopos").setLevel(logging.INFO) motor_recs = [ "XF:31IDA-OP{Tbl-Ax:X1}Mtr", "XF:31IDA-OP{Tbl-Ax:X2}Mtr", "XF:31IDA-OP{Tbl-Ax:X3}Mtr", "XF:31IDA-OP{Tbl-Ax:X4}Mtr", "XF:31IDA-OP{Tbl-Ax:X5}Mtr", "XF:31IDA-OP{Tbl-Ax:X6}Mtr", ] class Pseudo3x3(PseudoPositioner): pseudo1 = C(PseudoSingle, "", limits=(-10, 10), egu="a") pseudo2 = C(PseudoSingle, "", limits=(-10, 10), egu="b") pseudo3 = C(PseudoSingle, "", limits=None, egu="c") real1 = C(EpicsMotor, motor_recs[0]) real2 = C(EpicsMotor, motor_recs[1]) real3 = C(EpicsMotor, motor_recs[2]) def forward(self, pseudo_pos): pseudo_pos = self.PseudoPosition(*pseudo_pos) # logger.debug('forward %s', pseudo_pos) return self.RealPosition( real1=-pseudo_pos.pseudo1, real2=-pseudo_pos.pseudo2, real3=-pseudo_pos.pseudo3, ) def inverse(self, real_pos): real_pos = self.RealPosition(*real_pos) # logger.debug('inverse %s', real_pos) return self.PseudoPosition( pseudo1=real_pos.real1, pseudo2=real_pos.real2, pseudo3=real_pos.real3 ) class Pseudo1x3(PseudoPositioner): pseudo1 = C(PseudoSingle, limits=(-10, 10)) real1 = C(EpicsMotor, motor_recs[0]) real2 = C(EpicsMotor, motor_recs[1]) real3 = C(EpicsMotor, motor_recs[2]) def forward(self, pseudo_pos): pseudo_pos = self.PseudoPosition(*pseudo_pos) # logger.debug('forward %s', pseudo_pos) return self.RealPosition( real1=-pseudo_pos.pseudo1, real2=-pseudo_pos.pseudo1, real3=-pseudo_pos.pseudo1, ) def inverse(self, real_pos): real_pos = self.RealPosition(*real_pos) # logger.debug('inverse %s', real_pos) return self.PseudoPosition(pseudo1=-real_pos.real1) class FaultyStopperEpicsMotor(EpicsMotor): def stop(self, *, success=False): raise RuntimeError("Expected exception") class FaultyPseudo1x3(Pseudo1x3): real1 = C(FaultyStopperEpicsMotor, motor_recs[0]) def test_onlypseudo(): # can't instantiate it on its own with pytest.raises(TypeError): PseudoPositioner("prefix") def test_position_wrapper(): pseudo = Pseudo3x3("", name="mypseudo", concurrent=False) test_pos = pseudo.PseudoPosition(pseudo1=1, pseudo2=2, pseudo3=3) extra_kw = dict(a=3, b=4, c=6) # positional arguments assert pseudo.to_pseudo_tuple(1, 2, 3, **extra_kw) == (test_pos, extra_kw) # sequence assert pseudo.to_pseudo_tuple((1, 2, 3), **extra_kw) == (test_pos, extra_kw) # correct type assert pseudo.to_pseudo_tuple(test_pos, **extra_kw) == (test_pos, extra_kw) # kwargs assert pseudo.to_pseudo_tuple(pseudo1=1, pseudo2=2, pseudo3=3, **extra_kw) == ( test_pos, extra_kw, ) # too many positional arguments with pytest.raises(ValueError): pseudo.to_pseudo_tuple(1, 2, 3, 4) # valid kwargs, but passing in args too with pytest.raises(ValueError): pseudo.to_pseudo_tuple(1, pseudo1=1, pseudo2=2, pseudo3=3) @pytest.mark.motorsim def test_multi_sequential(): pseudo = Pseudo3x3("", name="mypseudo", concurrent=False) pseudo.wait_for_connection() assert pseudo.egu == "a, b, c" pos2 = pseudo.PseudoPosition(pseudo1=0, pseudo2=0, pseudo3=0) pseudo.set(pos2, wait=True) time.sleep(1.0) pos1 = pseudo.PseudoPosition(pseudo1=0.1, pseudo2=0.2, pseudo3=0.3) pseudo.set(pos1, wait=True) pseudo.real1.set(0, wait=True) pseudo.real2.set(0, wait=True) pseudo.real3.set(0, wait=True) pseudo.pseudo1.stop() pseudo.real3.set(0, wait=True) @pytest.mark.motorsim def test_faulty_stopper(): pseudo = FaultyPseudo1x3("", name="mypseudo", concurrent=False) pseudo.wait_for_connection() with pytest.raises(ExceptionBundle): # smoke-testing for coverage pseudo.pseudo1.stop() def test_limits(): pseudo = Pseudo3x3("", name="mypseudo", concurrent=True) assert pseudo.limits == ((-10, 10), (-10, 10), (0, 0)) assert pseudo.low_limit == (-10, -10, 0) assert pseudo.high_limit == (10, 10, 0) @pytest.mark.motorsim def test_read_describe(): pseudo = Pseudo3x3("", name="mypseudo", concurrent=True) pseudo.wait_for_connection() desc_dict = pseudo.describe() desc_keys = [ "source", "upper_ctrl_limit", "lower_ctrl_limit", "shape", "dtype", "units", ] for key in desc_keys: assert key in desc_dict["mypseudo_pseudo3"] read_dict = pseudo.read() read_keys = ["value", "timestamp"] for key in read_keys: assert key in read_dict["mypseudo_pseudo3"] assert pseudo.read().keys() == pseudo.describe().keys() @pytest.mark.motorsim def test_multi_concurrent(): def done(**kwargs): logger.debug("** Finished moving (%s)", kwargs) pseudo = Pseudo3x3( "", name="mypseudo", concurrent=True, settle_time=0.1, timeout=25.0 ) assert pseudo.sequential is False assert pseudo.concurrent is True assert pseudo.settle_time == 0.1 assert pseudo.timeout == 25.0 pseudo.wait_for_connection() assert pseudo.connected assert tuple(pseudo.pseudo_positioners) == ( pseudo.pseudo1, pseudo.pseudo2, pseudo.pseudo3, ) assert tuple(pseudo.real_positioners) == (pseudo.real1, pseudo.real2, pseudo.real3) logger.info("Move to (.2, .2, .2), which is (-.2, -.2, -.2) for real " "motors") pseudo.set(pseudo.PseudoPosition(0.2, 0.2, 0.2), wait=True) logger.info("Position is: %s (moving=%s)", pseudo.position, pseudo.moving) pseudo.check_value((2, 2, 2)) pseudo.check_value(pseudo.PseudoPosition(2, 2, 2)) try: pseudo.check_value((2, 2, 2, 3)) except ValueError as ex: logger.info("Check value failed, as expected (%s)", ex) real1 = pseudo.real1 pseudo1 = pseudo.pseudo1 try: pseudo.check_value((real1.high_limit + 1, 2, 2)) except ValueError as ex: logger.info("Check value failed, as expected (%s)", ex) ret = pseudo.set((2, 2, 2), wait=False, moved_cb=done) assert ret.settle_time == 0.1 count = 0 while not ret.done: logger.info("Pos=%s %s (err=%s)", pseudo.position, ret, ret.error) count += 1 if count > 1000: raise Exception time.sleep(0.1) logger.info("Single pseudo axis: %s", pseudo1) pseudo1.set(0, wait=True, timeout=5) assert pseudo1.target == 0 pseudo1.sync() assert pseudo1.target == pseudo1.position # coverage pseudo1._started_moving try: pseudo1.check_value(real1.high_limit + 1) except ValueError as ex: logger.info("Check value for single failed, as expected (%s)", ex) logger.info("Move pseudo1 to 0, position=%s", pseudo.position) logger.info("pseudo1 = %s", pseudo1.position) def single_sub(**kwargs): # logger.info('Single sub: %s', kwargs) pass pseudo1.subscribe(single_sub, pseudo1.SUB_READBACK) ret = pseudo1.set(1, wait=False) assert pseudo.timeout == ret.timeout count = 0 while not ret.done: logger.info( "pseudo1.pos=%s Pos=%s %s (err=%s)", pseudo1.position, pseudo.position, ret, ret.error, ) count += 1 if count > 20: raise Exception time.sleep(0.1) logger.info( "pseudo1.pos=%s Pos=%s %s (err=%s)", pseudo1.position, pseudo.position, ret, ret.error, ) copy(pseudo) pseudo.read() pseudo.describe() pseudo.read_configuration() pseudo.describe_configuration() repr(pseudo) str(pseudo) pseudo.pseudo1.read() pseudo.pseudo1.describe() pseudo.pseudo1.read_configuration() pseudo.pseudo1.describe_configuration() @pytest.mark.motorsim def test_single_pseudo(): logger.info("------- Sequential, single pseudo positioner") pos = Pseudo1x3("", name="mypseudo", concurrent=False) pos.wait_for_connection() reals = pos._real logger.info("Move to .2, which is (-.2, -.2, -.2) for real motors") pos.set((0.2,), wait=True) logger.info("Position is: %s (moving=%s)", pos.position, pos.moving) logger.info("Real positions: %s", [real.position for real in reals]) logger.info("Move to -.2, which is (.2, .2, .2) for real motors") pos.set((-0.2,), wait=True) logger.info("Position is: %s (moving=%s)", pos.position, pos.moving) logger.info("Real positions: %s", [real.position for real in reals]) copy(pos) pos.read() pos.describe() repr(pos) str(pos) @pytest.mark.parametrize( "inpargs,inpkwargs,expected_position,expected_kwargs", [ ((1, 2, 3), {}, (1, 2, 3), {}), ((1, 2), {}, (1, 2, -3), {}), ((1,), {}, (1, -2, -3), {}), (((1, 2, 3),), {}, (1, 2, 3), {}), (([1, 2],), {}, (1, 2, -3), {}), (((1,),), {}, (1, -2, -3), {}), ((), {"pseudo1": 1, "pseudo2": 2, "pseudo3": 3}, (1, 2, 3), {}), ((), {"pseudo1": 1, "pseudo2": 2}, (1, 2, -3), {}), ((), {"pseudo1": 1}, (1, -2, -3), {}), ((), {"pseudo1": 1, "wait": True}, (1, -2, -3), {"wait": True}), (({"pseudo1": 1, "pseudo2": 2, "pseudo3": 3},), {}, (1, 2, 3), {}), (({"pseudo1": 1, "pseudo2": 2},), {}, (1, 2, -3), {}), (({"pseudo1": 1},), {}, (1, -2, -3), {}), ( ({"pseudo1": 1, "wait": True},), {"timeout": None}, (1, -2, -3), {"wait": True, "timeout": None}, ), ((1, 2, 3), {"timeout": 1}, (1, 2, 3), {"timeout": 1}), (((1, 2, 3),), {"timeout": 1}, (1, 2, 3), {"timeout": 1}), ], ) def test_pseudo_position_input_3x3( hw, inpargs, inpkwargs, expected_position, expected_kwargs ): pseudo3x3 = hw.pseudo3x3 pseudo3x3.real1.set(1) pseudo3x3.real2.set(2) pseudo3x3.real3.set(3) out, extra_kwargs = pseudo3x3.to_pseudo_tuple(*inpargs, **inpkwargs) assert out == pseudo3x3.PseudoPosition(*expected_position) assert extra_kwargs == expected_kwargs pseudo3x3.set(*inpargs, **inpkwargs) assert pseudo3x3.position == pseudo3x3.PseudoPosition(*expected_position) @pytest.mark.parametrize( "inpargs,inpkwargs", [ ((1, 2, 3, 5), {}), ((1, 2, 3), {"pseudo1": 1}), ((1, 2, 3), {"pseudo2": 1}), ((1,), {"pseudo2": 1, "pseudo3": 1}), ((1, 2), {"pseudo3": 1}), ], ) def test_pseudo_position_fail_3x3(hw, inpargs, inpkwargs): pseudo3x3 = hw.pseudo3x3 with pytest.raises(ValueError): pseudo3x3.to_pseudo_tuple(*inpargs, **inpkwargs) @pytest.mark.parametrize( "inpargs,inpkwargs,expected_position,expected_kwargs", [ ((1, 2, 3), {}, (1, 2, 3), {}), ((1, 2), {}, (1, 2, 3), {}), ((1,), {}, (1, 2, 3), {}), (((1, 2, 3),), {}, (1, 2, 3), {}), (([1, 2],), {}, (1, 2, 3), {}), (((1,),), {}, (1, 2, 3), {}), ((), {"real1": 1, "real2": 2, "real3": 3}, (1, 2, 3), {}), ((), {"real1": 1, "real2": 2}, (1, 2, 3), {}), ((), {"real1": 1}, (1, 2, 3), {}), ((), {"real1": 1, "foo": "bar"}, (1, 2, 3), {"foo": "bar"}), (({"real1": 1, "real2": 2, "real3": 3},), {}, (1, 2, 3), {}), (({"real1": 1, "real2": 2},), {}, (1, 2, 3), {}), (({"real1": 1},), {}, (1, 2, 3), {}), ( ({"real1": 1, "foo": "bar"},), {"baz": "buz"}, (1, 2, 3), {"foo": "bar", "baz": "buz"}, ), ((1, 2, 3), {"foo": "bar"}, (1, 2, 3), {"foo": "bar"}), ], ) def test_real_position_input_3x3( hw, inpargs, inpkwargs, expected_position, expected_kwargs ): pseudo3x3 = hw.pseudo3x3 pseudo3x3.real1.set(1) pseudo3x3.real2.set(2) pseudo3x3.real3.set(3) out, extra_kwargs = pseudo3x3.to_real_tuple(*inpargs, **inpkwargs) assert out == pseudo3x3.RealPosition(*expected_position) assert extra_kwargs == expected_kwargs @pytest.mark.parametrize( "inpargs,inpkwargs", [ ((1, 2, 3, 5), {}), ((1, 2, 3), {"real1": 1}), ((1, 2, 3), {"real2": 1}), ((1,), {"real2": 1, "real3": 1}), ((1, 2), {"real3": 1}), (({"real3": 1, "foo": "bar"},), {"foo": "bizz"}), ((), {}), ], ) def test_real_position_fail_3x3(hw, inpargs, inpkwargs): pseudo3x3 = hw.pseudo3x3 with pytest.raises(ValueError): pseudo3x3.to_real_tuple(*inpargs, **inpkwargs) def test_single_pseudo_with_sim(hw): logger.info("------- Sequential, single pseudo positioner") pos = hw.pseudo1x3 reals = pos._real logger.info("Move to .2, which is (-.2, -.2, -.2) for real motors") pos.set((0.2,), wait=True) logger.info("Position is: %s (moving=%s)", pos.position, pos.moving) logger.info("Real positions: %s", [real.position for real in reals]) logger.info("Move to -.2, which is (.2, .2, .2) for real motors") pos.set((-0.2,), wait=True) logger.info("Position is: %s (moving=%s)", pos.position, pos.moving) logger.info("Real positions: %s", [real.position for real in reals]) copy(pos) pos.read() pos.describe() repr(pos) str(pos) @pytest.mark.parametrize("typ", ("to_real_tuple", "to_pseudo_tuple")) @pytest.mark.parametrize("op", (operator.sub, operator.add)) @pytest.mark.parametrize( "a,b", [((0, 0, 0), (1, 1, 1)), ((1, 0, 1), (1, 1, 1)), ((9, 0, 0.3), (0.3, 0.1, 0.5))], ) def test_pseudo_math(hw, a, b, op, typ): pos = hw.pseudo3x3 a, _ = getattr(pos, typ)(a) b, _ = getattr(pos, typ)(b) # TODO switch to np asserts expected = op(np.asarray(a), np.asarray(b)) assert (np.asarray(op(a, b)) == expected).all() assert (np.asarray(op(a, tuple(b))) == expected).all() assert (np.asarray(op(a, list(b))) == expected).all() assert (np.asarray(op(a, b._asdict())) == expected).all() assert (np.asarray(op(a, {})) == a).all() assert abs(op(a, b)) == np.sqrt(np.sum(expected ** 2)) def test_pseudo_hints(hw): pos = hw.pseudo3x3 for j in (1, 2, 3): p = getattr(pos, "pseudo{}".format(j)) assert p.hints["fields"] == [p.readback.name] p.readback.name = "aardvark{}".format(j) assert p.hints["fields"] == [p.readback.name] expected_fields = [ getattr(pos, "pseudo{}".format(j)).readback.name for j in (1, 2, 3) ] assert pos.hints["fields"] == expected_fields
py
1a3ad085b6895ce9038a6facceff0c63bdeb0c6c
from .authdb import LowballArangoDBAuthDB
py
1a3ad19e9c14038b060eb21a6868f9af0c5e2507
# Imports from 3rd party libraries import dash import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.express as px # Imports from this application from app import app # 2 column layout. 1st column width = 4/12 # https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout column1 = dbc.Col( [ dcc.Markdown( """ ## Shoe Price Prediction 🦶My passion for shoes is lackluster at best. I couldn't tell you shoe prices if my life depended on it. 👢This has really gone and bit me in the butt a few times. My wife has a healthy appetite for shoes and is quick to destroy any evidence linking cost to the shoes. I have to take her word that it cost a certain amount because it was on sale and clearance. 👠This app is built specifically for individuals who may lack shoe knowledge, and may be curious to roughly how expensive, specifically women shoes are, given certain parameters. """ ), dcc.Link(dbc.Button('Price out shoes', color='primary'), href='/predictions') ], md=4, ) column2 = dbc.Col( dcc.Markdown( """ [![](https://media.giphy.com/media/26tPpjJ7T6xCvWG76/giphy.gif)](https://www.youtube.com/watch?v=3HjIljJd-o0) *(Link NSFW for Language) """ ) ) layout = dbc.Row([column1, column2])
py
1a3ad20b266bb4d279b77c5ba53aaadc4ee38d3b
#!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Copyright (c) 2019-2021 Xenios SEZC # https://www.veriblock.org # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs.""" import io from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, disconnect_nodes, ) from test_framework.messages import CTransaction, COIN from test_framework.pop_const import POW_PAYOUT class TxnMallTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.supports_cli = False def skip_test_if_missing_module(self): self.skip_if_no_wallet() def add_options(self, parser): parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") parser.add_argument("--segwit", dest="segwit", default=False, action="store_true", help="Test behaviour with SegWit txn (which should fail") def setup_network(self): # Start with split network: super(TxnMallTest, self).setup_network() disconnect_nodes(self.nodes[1], 2) disconnect_nodes(self.nodes[2], 1) def run_test(self): if self.options.segwit: output_type = "p2sh-segwit" else: output_type = "legacy" # All nodes should start with 1250 vBTC: starting_balance = (POW_PAYOUT*25) for i in range(4): assert_equal(self.nodes[i].getbalance(), starting_balance) self.nodes[i].getnewaddress() # bug workaround, coins generated assigned to first getnewaddress! self.nodes[0].settxfee(.001) node0_address1 = self.nodes[0].getnewaddress(address_type=output_type) node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, ((POW_PAYOUT*25)-31)) node0_tx1 = self.nodes[0].gettransaction(node0_txid1) node0_address2 = self.nodes[0].getnewaddress(address_type=output_type) node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, (POW_PAYOUT-29)) node0_tx2 = self.nodes[0].gettransaction(node0_txid2) assert_equal(self.nodes[0].getbalance(), starting_balance + node0_tx1["fee"] + node0_tx2["fee"]) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress() # Send tx1, and another transaction tx2 that won't be cloned txid1 = self.nodes[0].sendtoaddress(node1_address, 40) txid2 = self.nodes[0].sendtoaddress(node1_address, 20) # Construct a clone of tx1, to be malleated rawtx1 = self.nodes[0].getrawtransaction(txid1, 1) clone_inputs = [{"txid": rawtx1["vin"][0]["txid"], "vout": rawtx1["vin"][0]["vout"], "sequence": rawtx1["vin"][0]["sequence"]}] clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][0]["value"], rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][1]["value"]} clone_locktime = rawtx1["locktime"] clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime) # createrawtransaction randomizes the order of its outputs, so swap them if necessary. clone_tx = CTransaction() clone_tx.deserialize(io.BytesIO(bytes.fromhex(clone_raw))) if (rawtx1["vout"][0]["value"] == 40 and clone_tx.vout[0].nValue != 40*COIN or rawtx1["vout"][0]["value"] != 40 and clone_tx.vout[0].nValue == 40*COIN): (clone_tx.vout[0], clone_tx.vout[1]) = (clone_tx.vout[1], clone_tx.vout[0]) # Use a different signature hash type to sign. This creates an equivalent but malleated clone. # Don't send the clone anywhere yet tx1_clone = self.nodes[0].signrawtransactionwithwallet(clone_tx.serialize().hex(), None, "ALL|ANYONECANPAY") assert_equal(tx1_clone["complete"], True) # Have node0 mine a block, if requested: if (self.options.mine_block): self.nodes[0].generate(1) self.sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50 vBTC for another # matured block, minus tx1 and tx2 amounts, and minus transaction fees: expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"] if self.options.mine_block: expected += POW_PAYOUT expected += tx1["amount"] + tx1["fee"] expected += tx2["amount"] + tx2["fee"] assert_equal(self.nodes[0].getbalance(), expected) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Send clone and its parent to miner self.nodes[2].sendrawtransaction(node0_tx1["hex"]) txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"]) if self.options.segwit: assert_equal(txid1, txid1_clone) return # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: connect_nodes(self.nodes[1], 2) self.nodes[2].sendrawtransaction(node0_tx2["hex"]) self.nodes[2].sendrawtransaction(tx2["hex"]) self.nodes[2].generate(1) # Mine another block to make sure we sync self.sync_blocks() # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx1_clone = self.nodes[0].gettransaction(txid1_clone) tx2 = self.nodes[0].gettransaction(txid2) # Verify expected confirmations assert_equal(tx1["confirmations"], -2) assert_equal(tx1_clone["confirmations"], 2) assert_equal(tx2["confirmations"], 1) # Check node0's total balance; should be same as before the clone, + 60 vBTC for 2 matured, # less possible orphaned matured subsidy expected += (POW_PAYOUT * 2) if (self.options.mine_block): expected -= POW_PAYOUT assert_equal(self.nodes[0].getbalance(), expected) if __name__ == '__main__': TxnMallTest().main()
py
1a3ad31a02640943bcc8fc6435ddd8af28bf14d8
# Input: # 1 # 1 # 1 # 2 if __name__ == '__main__': # Take the input in correct format x = int(input()) y = int(input()) z = int(input()) n = int(input()) # Using for loop # final_list =[] # Iterate from x to z append the values into one list # for i in range(x + 1): # for j in range(y + 1): # for k in range(z + 1): # Only add the sublists whose sum is not equal to n # if sum([i, j, k]) != n: # final_list.append([i, j, k]) # Defining Final list final_list = [] # Create grid with the given values [[[final_list.append([i, j, k]) for k in range(z + 1)] for j in range(y + 1)] for i in range(x + 1)] # Only add the sublists whose sum is not equal to n last_list = [ final_list[i] for i in range(len(final_list)) if sum(final_list[i]) != n ] # Retun of print the modified list print([ final_list[i] for i in range(len(final_list)) if sum(final_list[i]) != n ])
py
1a3ad343f9a5a7f147d582faaaeaa4e2f1435ce0
# ***************************************************************** # Copyright 2013 MIT Lincoln Laboratory # Project: SPAR # Authors: ATLH # Description: Tests for equality_query_generator # # Modifications: # Date Name Modification # ---- ---- ------------ # 6 August 2012 ATLH Original version # ***************************************************************** from __future__ import division import os import sys this_dir = os.path.dirname(os.path.abspath(__file__)) base_dir = os.path.join(this_dir, '..', '..', '..') sys.path.append(base_dir) import unittest import time import keyword_query_generator as kqg import spar_python.common.spar_random as spar_random import spar_python.common.distributions.text_generator as text_generator import StringIO as s import spar_python.query_generation.query_schema as qs import spar_python.data_generation.spar_variables as sv class KeywordQueryGeneratorTest(unittest.TestCase): def setUp(self): self.seed = int(time.time()) self.seed_msg = "Random seed used for this test: %s" % self.seed self.longMessage = True spar_random.seed(self.seed) #set up intitialization values sub_cat = 'word' f = s.StringIO('''Buck had accepted the rope with quiet dignity. To be sure, it unwonted performance: but he had learned to trust in men he knew, and to give them credit for a wisdom that outreached his own. But when the ends of the ropes were placed in the strangers hands, he growled menacingly. He had merely intimated his displeasure, in his pride believing that to intimate was to command. But to his surprise the rope tightened around his neck, shutting off his breath. In quick rage he sprang at the man, who met him halfway, grappled him close by the throat, and with a deft twist threw him over on his back. Then the rope tightened mercilessly, while Buck struggled in a fury, his tongue lolling out of his mouth and his great chest. Never in all his life had he been so vilely treated, and never in all his life had he been so angry. But his strength ebbed, his eyes glazed, and he knew nothing when the train was flagged and the two men threw him into the baggage car.''') self._kw_dist = text_generator.TextGenerator((f,)) fields = [sv.VARS.NOTES3] dists = [self._kw_dist] other_fields = ['no_queries', 'rss','keyword_len','type'] other_cols = [[3, 60, 4, 'word'], [3, 60, 5, 'word'], [3, 75, 4, 'stem'], [3, 60, 5, 'stem']] self.generator = kqg.KeywordQueryGenerator('P3',sub_cat, ["LL"],dists, fields, 1000, 100, other_fields, other_cols) @unittest.skip("Sporadically fails, not sure why") def testGenerateQuery(self): """ Tests equality query generator against a 'db' to make sure it is generating the right queries """ #generate a 'db' to test against notes = [self._kw_dist.generate(125) for _ in xrange(1000)] #generate queries query_batches = self.generator.produce_query_batches() queries = [] for query_batch in query_batches: queries += query_batch.produce_queries() #check to see right number of queries generated self.assertGreaterEqual(len(queries), 6, self.seed_msg) #check queries against 'db' to make sure they match within a factor #of two word = 0 stem = 0 working_queries = 0 non_working_queries = [] for q in queries: if q[qs.QRY_TYPE] == 'word': x = lambda generated_text: \ generated_text.contains_upper(q[qs.QRY_SEARCHFOR]) word +=1 elif q[qs.QRY_TYPE] == 'stem': x = lambda generated_text: \ generated_text.contains_stem(q[qs.QRY_SEARCHFOR]) stem +=1 count_match = len([note for note in notes if x(note)]) msg = 'Query %d was: \n' \ 'sub_cat: %s\n'\ 'field: %s\n'\ 'type: %s\n'\ 'rss: %d\n'\ 'value: %s\n' % (q[qs.QRY_QID], q[qs.QRY_SUBCAT], q[qs.QRY_FIELD], q[qs.QRY_TYPE], q[qs.QRY_RSS], q[qs.QRY_SEARCHFOR]) if count_match <= q[qs.QRY_URSS]*4 and count_match >= q[qs.QRY_LRSS]/4: working_queries+=1 else: non_working_queries.append(msg) fail_msg = '' for msg in non_working_queries[:3]: fail_msg += msg self.assertGreaterEqual(working_queries, 6, fail_msg) #check to see each field had the correct number of queries #ideally this number would be greater than 6 (the requested amount) #but because the distribution used for unit testing is so small #there is a greater margin of error at this scale self.assertGreaterEqual(word, 3, self.seed_msg) self.assertGreaterEqual(stem, 3, self.seed_msg)
py
1a3ad3a0e865be8582efcc97b87ec12b951d000d
"""Provides data related to paths.""" import sys from pathlib import PurePosixPath, PureWindowsPath from typing import Any, Final from mimesis.data import ( FOLDERS, PLATFORMS, PROGRAMMING_LANGS, PROJECT_NAMES, USERNAMES, ) from mimesis.providers.base import BaseProvider __all__ = ["Path"] class Path(BaseProvider): """Class that provides methods and property for generate paths.""" def __init__( self, platform: str = sys.platform, *args: Any, **kwargs: Any, ) -> None: """Initialize attributes. Supported platforms: 'linux', 'darwin', 'win32', 'win64'. :param platform: Required platform type. """ super().__init__(*args, **kwargs) self.platform = platform self._pathlib_home = PureWindowsPath() if "win" in platform else PurePosixPath() self._pathlib_home /= PLATFORMS[platform]["home"] class Meta: """Class for metadata.""" name: Final[str] = "path" def root(self) -> str: """Generate a root dir path. :return: Root dir. :Example: / """ return str(self._pathlib_home.parent) def home(self) -> str: """Generate a home path. :return: Home path. :Example: /home """ return str(self._pathlib_home) def user(self) -> str: """Generate a random user. :return: Path to user. :Example: /home/oretha """ user = self.random.choice(USERNAMES) user = user.capitalize() if "win" in self.platform else user.lower() return str(self._pathlib_home / user) def users_folder(self) -> str: """Generate a random path to user's folders. :return: Path. :Example: /home/taneka/Pictures """ user = self.user() folder = self.random.choice(FOLDERS) return str(self._pathlib_home / user / folder) def dev_dir(self) -> str: """Generate a random path to development directory. :return: Path. :Example: /home/sherrell/Development/Python """ user = self.user() folder = self.random.choice(["Development", "Dev"]) stack = self.random.choice(PROGRAMMING_LANGS) return str(self._pathlib_home / user / folder / stack) def project_dir(self) -> str: """Generate a random path to project directory. :return: Path to project. :Example: /home/sherika/Development/Falcon/mercenary """ dev_dir = self.dev_dir() project = self.random.choice(PROJECT_NAMES) return str(self._pathlib_home / dev_dir / project)
py
1a3ad49fd547dbe676274c5229f948dfa68e0b86
# coding: utf-8 """ ORR API Documentation The main ORR documentation is located at: https://mmisw.org/orrdoc/ __Please note__: - The ORR API is approaching a stable version but is still work in progress. Please [let us know](https://github.com/mmisw/mmiorr-docs/issues) if you have any questions or suggestions. - Besides the documentation itself, this page lets you directly exercise and test the API. Click on any operation header below to learn more details about it, and see a \"Try it out\" button. - You can click on the \"Authorize\" button at the top right of this page (or the `!` icon under the particular operation) to retrieve an authentication token corresponding to your ORR instance credentials (username and password). Once authorized, the authentication token will be automatically included in the corresponding request. You will be able to not only perform the basic `GET` operations, but also see expanded responses according to your access privileges as well as perform other operations. - The \"Try it out\" button will also show the corresponding API call that you can submit from the command line using [`curl`](https://curl.haxx.se/). - This API includes administrative operations related with the triple store. The SPARQL endpoint itself (located at `http://cor.esipfed.org/sparql` for the MMI ORR instance) is not described here. (General SPARQL information can be found [here](https://en.wikipedia.org/wiki/SPARQL), and regarding the current service used by the ORR to support the SPARQL interface [here](http://franz.com/agraph/support/documentation/current/http-protocol.html).) - Actual requests from this page are against the specific endpoint at `http://cor.esipfed.org/ont`. # noqa: E501 OpenAPI spec version: v0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import swagger_client from swagger_client.api.organization_api import OrganizationApi # noqa: E501 from swagger_client.rest import ApiException class TestOrganizationApi(unittest.TestCase): """OrganizationApi unit test stubs""" def setUp(self): self.api = swagger_client.api.organization_api.OrganizationApi() # noqa: E501 def tearDown(self): pass def test_add_org(self): """Test case for add_org Registers an organization # noqa: E501 """ pass def test_delete_org(self): """Test case for delete_org Unregisters an organization # noqa: E501 """ pass def test_org_get(self): """Test case for org_get Gets information about registered organizations # noqa: E501 """ pass def test_org_org_name_get(self): """Test case for org_org_name_get Gets basic information of a particular organization # noqa: E501 """ pass def test_update_org(self): """Test case for update_org Updates information about a registered organization # noqa: E501 """ pass if __name__ == '__main__': unittest.main()
py
1a3ad4ea74879b32637a95401cc78ccfe43f9969
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity, linear_kernel from scipy.sparse import csr_matrix from sklearn.neighbors import NearestNeighbors from fuzzywuzzy import process from fuzzywuzzy import fuzz import streamlit as st movies=pd.read_csv('movies.csv') ratings=pd.read_csv('ratings.csv') movies.shape movies.head() ratings.shape ratings.head() ratings.groupby(by='movieId')['rating'].mean() ratings.drop('timestamp',axis='columns',inplace=True) ratings.drop_duplicates(subset='movieId',inplace=True) ratings.shape ratings['rating'].value_counts() movies['genres']=movies['genres'].str.replace('|',' ') movies['genres']=movies['genres'].apply(lambda x:x.lstrip().split(' ')[0]) movies['genres'].value_counts() sns.countplot(ratings['rating']) sns.boxplot(ratings['rating'],ratings['userId']) movies['genres'].value_counts()[:10].plot.pie(cmap='Set3') movies.isnull().sum() ratings.isnull().sum()
py
1a3ad72bb249baa5bf40407c4867d93fa20931ac
from wurst.geo import geomatcher from rmnd_lca import DATA_DIR REGION_MAPPING_FILEPATH = (DATA_DIR / "regionmappingH12.csv") class Geomap(): """ Map ecoinvent locations to REMIND regions and vice-versa. """ def __init__(self): self.geo = self.get_REMIND_geomatcher() def get_REMIND_geomatcher(self): """ Load a geomatcher object from the `constructive_geometries`library and add definitions. It is used to find correspondences between REMIND and ecoinvent region names. :return: geomatcher object :rtype: wurst.geo.geomatcher """ with open(REGION_MAPPING_FILEPATH) as f: f.readline() csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()] l = [(x[1], x[2]) for x in csv_list] # List of countries not found countries_not_found = ["CC", "CX", "GG", "JE", "BL"] rmnd_to_iso = {} iso_to_rmnd = {} # Build a dictionary that maps region names (used by REMIND) to ISO country codes # And a reverse dictionary that maps ISO country codes to region names for ISO, region in l: if ISO not in countries_not_found: try: rmnd_to_iso[region].append(ISO) except KeyError: rmnd_to_iso[region] = [ISO] iso_to_rmnd[region] = ISO geo = geomatcher geo.add_definitions(rmnd_to_iso, "REMIND") return geo def remind_to_ecoinvent_location(self, location): """ Find the corresponding ecoinvent region given a REMIND region. :param location: name of a REMIND region :type location: str :return: name of an ecoinvent region :rtype: str """ if location != "World": location = ("REMIND", location) ecoinvent_locations = [] try: for r in self.geo.intersects(location): if not isinstance(r, tuple): ecoinvent_locations.append(r) return ecoinvent_locations except KeyError as e: print("Can't find location {} using the geomatcher.".format(location)) else: return ["GLO"] def ecoinvent_to_remind_location(self, location): """ Return a REMIND region name for a 2-digit ISO country code given. Set rules in case two REMIND regions are within the ecoinvent region. :param location: 2-digit ISO country code :type location: str :return: REMIND region name :rtype: str """ mapping = {"GLO": "World", "RoW": "CAZ", "IAI Area, Russia & RER w/o EU27 & EFTA": "REF"} if location in mapping: return mapping[location] remind_location = [ r[1] for r in self.geo.within(location) if r[0] == "REMIND" and r[1] != "World" ] mapping = { ("AFR", "MEA"): "AFR", ("AFR", "SSA"): "AFR", ("EUR", "NEU"): "EUR", ("EUR", "REF"): "EUR", ("OAS", "CHA"): "OAS", ("OAS", "EUR"): "OAS", ("OAS", "IND"): "OAS", ("OAS", "JPN"): "OAS", ("OAS", "MEA"): "OAS", ("OAS", "REF"): "OAS", ("USA", "CAZ"): "USA", } # If we have more than one REMIND region if len(remind_location) > 1: # TODO: find a more elegant way to do that for key, value in mapping.items(): # We need to find the most specific REMIND region if len(set(remind_location).intersection(set(key))) == 2: remind_location.remove(value) return remind_location[0] elif len(remind_location) == 0: print("no location for {}".format(location)) else: return remind_location[0]
py
1a3ad7aa9386365263fd55cd11a20135612b1f7d
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import os import sys import unittest import kubernetes.client from kubernetes.client.rest import ApiException from kubernetes.client.models.v1_volume_attachment_source import V1VolumeAttachmentSource class TestV1VolumeAttachmentSource(unittest.TestCase): """ V1VolumeAttachmentSource unit test stubs """ def setUp(self): pass def tearDown(self): pass def testV1VolumeAttachmentSource(self): """ Test V1VolumeAttachmentSource """ # FIXME: construct object with mandatory attributes with example values #model = kubernetes.client.models.v1_volume_attachment_source.V1VolumeAttachmentSource() pass if __name__ == '__main__': unittest.main()
py
1a3ad7f76f72043f8d334b72de5692fe4a75b44d
from django.apps import AppConfig class ElectionsConfig(AppConfig): name = "apollo.elections"
py
1a3ad984643c6a5a2bb6f87a3f921d55a7bd69c5
""" Image classes """ import base64 import logging import zipfile from urllib.parse import quote_plus from galaxy.datatypes.text import Html as HtmlFromText from galaxy.util import nice_size from galaxy.util.image_util import check_image_type from . import data log = logging.getLogger(__name__) # TODO: Uploading image files of various types is supported in Galaxy, but on # the main public instance, the display_in_upload is not set for these data # types in datatypes_conf.xml because we do not allow image files to be uploaded # there. There is currently no API feature that allows uploading files outside # of a data library ( where it requires either the upload_paths or upload_directory # option to be enabled, which is not the case on the main public instance ). Because # of this, we're currently safe, but when the api is enhanced to allow other uploads, # we need to ensure that the implementation is such that image files cannot be uploaded # to our main public instance. class Image(data.Data): """Class describing an image""" edam_data = 'data_2968' edam_format = "format_3547" file_ext = '' def __init__(self, **kwd): super().__init__(**kwd) self.image_formats = [self.file_ext.upper()] def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = 'Image in %s format' % dataset.extension dataset.blurb = nice_size(dataset.get_size()) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' def sniff(self, filename): """Determine if the file is in this format""" return check_image_type(filename, self.image_formats) def handle_dataset_as_image(self, hda): dataset = hda.dataset name = hda.name or '' with open(dataset.file_name, "rb") as f: base64_image_data = base64.b64encode(f.read()).decode("utf-8") return f"![{name}](data:image/{self.file_ext};base64,{base64_image_data})" class Jpg(Image): edam_format = "format_3579" file_ext = "jpg" def __init__(self, **kwd): super().__init__(**kwd) self.image_formats = ['JPEG'] class Png(Image): edam_format = "format_3603" file_ext = "png" class Tiff(Image): edam_format = "format_3591" file_ext = "tiff" class Hamamatsu(Image): file_ext = "vms" class Mirax(Image): file_ext = "mrxs" class Sakura(Image): file_ext = "svslide" class Nrrd(Image): file_ext = "nrrd" class Bmp(Image): edam_format = "format_3592" file_ext = "bmp" class Gif(Image): edam_format = "format_3467" file_ext = "gif" class Im(Image): edam_format = "format_3593" file_ext = "im" class Pcd(Image): edam_format = "format_3594" file_ext = "pcd" class Pcx(Image): edam_format = "format_3595" file_ext = "pcx" class Ppm(Image): edam_format = "format_3596" file_ext = "ppm" class Psd(Image): edam_format = "format_3597" file_ext = "psd" class Xbm(Image): edam_format = "format_3598" file_ext = "xbm" class Xpm(Image): edam_format = "format_3599" file_ext = "xpm" class Rgb(Image): edam_format = "format_3600" file_ext = "rgb" class Pbm(Image): edam_format = "format_3601" file_ext = "pbm" class Pgm(Image): edam_format = "format_3602" file_ext = "pgm" class Eps(Image): edam_format = "format_3466" file_ext = "eps" class Rast(Image): edam_format = "format_3605" file_ext = "rast" class Pdf(Image): edam_format = "format_3508" file_ext = "pdf" def sniff(self, filename): """Determine if the file is in pdf format.""" with open(filename, 'rb') as fh: return fh.read(4) == b"%PDF" def create_applet_tag_peek(class_name, archive, params): text = """ <object classid="java:{}" type="application/x-java-applet" height="30" width="200" align="center" > <param name="archive" value="{}"/>""".format(class_name, archive) for name, value in params.items(): text += f"""<param name="{name}" value="{value}"/>""" text += """ <object classid="clsid:8AD9C840-044E-11D1-B3E9-00805F499D93" height="30" width="200" > <param name="code" value="{}" /> <param name="archive" value="{}"/>""".format(class_name, archive) for name, value in params.items(): text += f"""<param name="{name}" value="{value}"/>""" text += """<div class="errormessage">You must install and enable Java in your browser in order to access this applet.<div></object> </object> """ return """<div><p align="center">%s</p></div>""" % text class Gmaj(data.Data): """Class describing a GMAJ Applet""" edam_format = "format_3547" file_ext = "gmaj.zip" copy_safe_peek = False def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: if hasattr(dataset, 'history_id'): params = { "bundle": "display?id=%s&tofile=yes&toext=.zip" % dataset.id, "buttonlabel": "Launch GMAJ", "nobutton": "false", "urlpause": "100", "debug": "false", "posturl": "history_add_to?%s" % "&".join("{}={}".format(x[0], quote_plus(str(x[1]))) for x in [('copy_access_from', dataset.id), ('history_id', dataset.history_id), ('ext', 'maf'), ('name', 'GMAJ Output on data %s' % dataset.hid), ('info', 'Added by GMAJ'), ('dbkey', dataset.dbkey)]) } class_name = "edu.psu.bx.gmaj.MajApplet.class" archive = "/static/gmaj/gmaj.jar" dataset.peek = create_applet_tag_peek(class_name, archive, params) dataset.blurb = 'GMAJ Multiple Alignment Viewer' else: dataset.peek = "After you add this item to your history, you will be able to launch the GMAJ applet." dataset.blurb = 'GMAJ Multiple Alignment Viewer' else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' def display_peek(self, dataset): try: return dataset.peek except Exception: return "peek unavailable" def get_mime(self): """Returns the mime type of the datatype""" return 'application/zip' def sniff(self, filename): """ NOTE: the sniff.convert_newlines() call in the upload utility will keep Gmaj data types from being correctly sniffed, but the files can be uploaded (they'll be sniffed as 'txt'). This sniff function is here to provide an example of a sniffer for a zip file. """ if not zipfile.is_zipfile(filename): return False contains_gmaj_file = False with zipfile.ZipFile(filename, "r") as zip_file: for name in zip_file.namelist(): if name.split(".")[1].strip().lower() == 'gmaj': contains_gmaj_file = True break if not contains_gmaj_file: return False return True class Html(HtmlFromText): """Deprecated class. This class should not be used anymore, but the galaxy.datatypes.text:Html one. This is for backwards compatibilities only.""" class Laj(data.Text): """Class describing a LAJ Applet""" file_ext = "laj" copy_safe_peek = False def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: if hasattr(dataset, 'history_id'): params = { "alignfile1": "display?id=%s" % dataset.id, "buttonlabel": "Launch LAJ", "title": "LAJ in Galaxy", "posturl": quote_plus("history_add_to?%s" % "&".join(f"{key}={value}" for key, value in {'history_id': dataset.history_id, 'ext': 'lav', 'name': 'LAJ Output', 'info': 'Added by LAJ', 'dbkey': dataset.dbkey, 'copy_access_from': dataset.id}.items())), "noseq": "true" } class_name = "edu.psu.cse.bio.laj.LajApplet.class" archive = "/static/laj/laj.jar" dataset.peek = create_applet_tag_peek(class_name, archive, params) else: dataset.peek = "After you add this item to your history, you will be able to launch the LAJ applet." dataset.blurb = 'LAJ Multiple Alignment Viewer' else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' def display_peek(self, dataset): try: return dataset.peek except Exception: return "peek unavailable"
py
1a3adb93e556638e623416385e24f05900b22483
#!/usr/bin/env python import os, sys, unittest lib_path = os.path.abspath(os.path.join('..')) sys.path.append(lib_path) from osler.assertion import Assertion from osler.criterion import Criterion class TestCriterion(unittest.TestCase): def test_init(self): assertion = Assertion("This is an assertion", 1.3) criterion = Criterion(assertion, True) self.assertEqual(criterion.assertion, assertion) self.assertEqual(criterion.truth_value, True) self.assertEqual(criterion.name, "This_is_an_assertion_is_True") def test_equivalence(self): assertion1 = Assertion("This is an assertion", 1.3) criterion1 = Criterion(assertion1, True) assertion2 = Assertion("This is an assertion", 1.3) criterion2 = Criterion(assertion2, True) self.assertTrue(criterion1==criterion2) self.assertFalse(criterion1.opposite(criterion2)) def test_opposites(self): assertion = Assertion("This is an assertion", 1.3) criterion1 = Criterion(assertion, True) criterion2 = Criterion(assertion, False) self.assertTrue(criterion1.opposite(criterion2)) self.assertFalse(criterion1==criterion2) if __name__ == '__main__': unittest.main()
py
1a3adc0fd1bd76fa9e603d8a8ba82b13e8fc5e80
import json def format_row(tab_id, row_no): return { "repeatCell": { "range": { "sheetId": tab_id, "startRowIndex": row_no, "endRowIndex": row_no + 1, "startColumnIndex": 0, "endColumnIndex": 6 }, "cell": { "userEnteredFormat": { "backgroundColor": { "red": 0.7176471, "green": 0.88235295, "blue": 0.8039216 }, "horizontalAlignment": "LEFT", "textFormat": { "fontSize": 10, "bold": True } } }, "fields": "userEnteredFormat(backgroundColor,textFormat,horizontalAlignment)" } } def get_editors(): """ args: editors: list, list of gmail addresses of authorized editors returns: users_dict: dict, dictionary of users formatted for google sheet API """ with open('./data/editors.json', 'r') as jsonfile: editors = json.load(jsonfile) return editors def cat_headings_formating(tab_id, row_nos): return { "requests": [format_row(tab_id, n) for n in row_nos] } def branch_validation(tab_id, branch_count): values = [ {"userEnteredValue" : f"='branch codes'!$A$2:$A${branch_count + 1}"}] return { "range": { "sheetId": tab_id, "startRowIndex": 2, "startColumnIndex": 7, "endColumnIndex": 8, }, "rule": { "condition": { "type": 'ONE_OF_RANGE', "values": values }, "inputMessage": 'invalid branch code', "strict": True, "showCustomUi": True } } def shopping_cart_validation_tab_template(tab_id): """ Encodes proprerties of Google Sheet validation tab args: tab_id: string, tab id returns: body: dictionary, request body """ return { "requests": [ { 'updateSheetProperties': { 'properties': { 'sheetId': tab_id, 'hidden': True, }, 'fields': 'hidden' }, }, { "addProtectedRange": { "protectedRange": { "range": { "sheetId": tab_id, "endColumnIndex": 0, "endColumnIndex": 1 }, "description": "admin edits only", "warningOnly": False, "requestingUserCanEdit": False, "editors": get_editors() } } }, ], 'includeSpreadsheetInResponse': False, 'responseIncludeGridData': False } def shopping_cart_data_tab_template(tab_id, branch_count): """ Encodes properies of Google Sheet data tabs args: tab_id: string, tab (sheet id) returns: body: dictionary, request body """ return { "requests": [ { "repeatCell": { "range": { "sheetId": tab_id, "startRowIndex": 0, "endRowIndex": 1 }, "cell": { "userEnteredFormat": { "backgroundColor": { "red": 0.64313725, "green": 0.76078431, "blue": 0.95686275 }, "horizontalAlignment": "CENTER", "textFormat": { "fontSize": 10, "bold": True } } }, "fields": "userEnteredFormat(backgroundColor,textFormat,horizontalAlignment)" } }, { 'updateSheetProperties': { 'properties': { 'sheetId': tab_id, 'gridProperties': { 'frozenRowCount': 1 }, }, 'fields': 'gridProperties.frozenRowCount' }, }, { "addConditionalFormatRule": { "rule": { "ranges": [ { "sheetId": tab_id, "startRowIndex": 2, "startColumnIndex": 7, "endColumnIndex": 8 } ], "booleanRule": { "condition": { "type": "NOT_BLANK", "values": [] }, "format": { 'backgroundColor': { 'red': 0.95686275, 'green': 0.78039217, 'blue': 0.7647059, } } } }, "index": 0 } }, { "setDataValidation": branch_validation(tab_id, branch_count) }, { "addProtectedRange": { "protectedRange": { "range": { "sheetId": tab_id, "endColumnIndex": 0, "endColumnIndex": 7 }, "description": "admin edits only", "warningOnly": False, "requestingUserCanEdit": False, "editors": get_editors() } } }, { "updateDimensionProperties": { "range": { "sheetId": tab_id, "dimension": "COLUMNS", "startIndex": 0, "endIndex": 1 }, "properties": { "pixelSize": 100 }, "fields": "pixelSize" } }, { "updateDimensionProperties": { "range": { "sheetId": tab_id, "dimension": "COLUMNS", "startIndex": 1, "endIndex": 2 }, "properties": { "pixelSize": 250 }, "fields": "pixelSize" } }, { "updateDimensionProperties": { "range": { "sheetId": tab_id, "dimension": "COLUMNS", "startIndex": 2, "endIndex": 3 }, "properties": { "pixelSize": 400 }, "fields": "pixelSize" } }, { "updateDimensionProperties": { "range": { "sheetId": tab_id, "dimension": "COLUMNS", "startIndex": 3, "endIndex": 4 }, "properties": { "pixelSize": 250 }, "fields": "pixelSize" } }, { "updateDimensionProperties": { "range": { "sheetId": tab_id, "dimension": "COLUMNS", "startIndex": 4, "endIndex": 5 }, "properties": { "pixelSize": 75 }, "fields": "pixelSize" } }, { "updateDimensionProperties": { "range": { "sheetId": tab_id, "dimension": "COLUMNS", "startIndex": 5, "endIndex": 6 }, "properties": { "pixelSize": 50 }, "fields": "pixelSize" } }, { "updateDimensionProperties": { "range": { "sheetId": tab_id, "dimension": "COLUMNS", "startIndex": 6, "endIndex": 7 }, "properties": { "pixelSize": 80, "hiddenByUser": True }, "fields": "pixelSize, hiddenByUser" } }, { "updateDimensionProperties": { "range": { "sheetId": tab_id, "dimension": "COLUMNS", "startIndex": 7, "endIndex": 8 }, "properties": { "pixelSize": 80 }, "fields": "pixelSize" } }, ], 'includeSpreadsheetInResponse': False, 'responseIncludeGridData': False }
py
1a3adc1984eeb3db5955052d3de44b81989df17d
import os import re import numpy as np import argparse import sys from natsort import natsorted from plyfile import PlyData, PlyElement import pandas as pd ''' script to evaluate a model execution example: - python3 evaluate_instances.py --path_run /home/uib/Desktop/test_evaluate_instances/ --path_cls /home/uib/Desktop/test_evaluate_instances/classes.txt --iou_thr 0.5 --test_name test --ref 0 ''' def get_iou(inst1,inst2): inst1 = inst1[:, 0:3].tolist() inst2 = inst2[:, 0:3].tolist() intersection = 0 for i in inst1: if i in inst2: intersection += 1 union = len(inst1) + len(inst2) - intersection iou = intersection/union return iou def read_ply(filename): """ read XYZ point cloud from filename PLY file """ plydata = PlyData.read(filename) pc = plydata['vertex'].data pc_array = np.array([[x, y, z, r, g, b, c, i] for x,y,z,r,g,b,c,i in pc]) return pc_array def get_info_classes(cls_path): classes = [] colors = [] for line in open(cls_path): data = line.split() classes.append(data[0]) colors.append([int(data[1]), int(data[2]), int(data[3])]) labels = {cls: i for i, cls in enumerate(classes)} label2color = {classes.index(cls): colors[classes.index(cls)] for cls in classes} return classes, labels, label2color def main(): parser = argparse.ArgumentParser() parser.add_argument('--path_runs', help='path to the run folder.') parser.add_argument('--path_cls', help='path to the class file.') parser.add_argument('--iou_thr', default=0.5, help='min iou.') parser.add_argument('--test_name', help='name of the test') parser.add_argument('--ref', default=1, help='name of the test') parsed_args = parser.parse_args(sys.argv[1:]) path_runs = parsed_args.path_runs path_cls = parsed_args.path_cls # get class txt path iou_thr = float(parsed_args.iou_thr) test_name = parsed_args.test_name ref = int(parsed_args.ref) for run in listdir(path_runs): print("evaluating run: " + run) path_run = os.path.join(path_runs,run) path_infer = os.path.join(path_run, 'dump_' + test_name) classes, labels, label2color = get_info_classes(path_cls) files = natsorted(os.listdir(path_infer)) cases = [s for s in files if s.endswith(".obj")] names = natsorted(set([re.split("[.\_]+", string)[0] for string in cases])) tp = np.zeros((len(classes),), dtype=int) fp = np.zeros((len(classes),), dtype=int) n_gt = np.zeros((len(classes),), dtype=int) n_pred = np.zeros((len(classes),), dtype=int) iou_max_sum = np.zeros((len(classes),), dtype=float) for name in names: print("evaluating case: " + name) path_gt = os.path.join(path_infer, name + "_gt_inst.ply") path_pred = os.path.join(path_infer, name + "_pred_inst.ply") if ref==1: path_pred = os.path.join(path_infer, name + "_pred_inst_ref.ply") gt = read_ply(path_gt) pred = read_ply(path_pred) if (gt.shape[0]>2) and (pred.shape[0]>2): # IN CASE GT OR PRED ARE "EMPTY" - LOK AT GET_INSTANCES OUTPUT WHEN NO INSTANCES ORE FOUND (THEY SAVE "NULL" A TWO ROW NUMPY) gt_list = list() instances_gt = set(gt[..., 7]) instances_pred = set(pred[..., 7]) for i in instances_gt: inst = gt[np.where(gt[..., 7] == float(i))] gt_list.append(inst) n_gt[int(inst[0, 6])] += 1 pred_list = list() for j in instances_pred: inst = pred[np.where(pred[..., 7] == float(j))] pred_list.append(inst) n_pred[int(inst[0, 6])] += 1 for i, pred_inst in enumerate(pred_list): iou_list = list() for j, gt_inst in enumerate(gt_list): if pred_inst[0, 6] == gt_inst[0, 6]: iou = get_iou(pred_inst,gt_inst) else: iou = 0 iou_list.append(iou) iou_max = max(iou_list) iou_max_sum[int(pred_inst[0, 6])]+= iou_max if iou_max >= iou_thr: tp[int(pred_inst[0, 6])] += 1 else: fp[int(pred_inst[0, 6])] += 1 fn = n_gt - tp iou_max_mean = iou_max_sum / n_pred # hacer cambios para sacar una fila de excel por cada clase con su nombre recall = tp/(tp+fn) precision = tp/(tp+fp) f1 = (2*recall*precision)/(recall+precision) filepath = os.path.join(path_run, "evaluation_instance_" + test_name + ".xlsx") if ref==1: filepath = os.path.join(path_run, "evaluation_instance_ref_" + test_name + ".xlsx") header = ['Recall', 'Precision', 'F1', 'mean_IoU'] csv = ({header[0]: recall, header[1]: precision, header[2]: f1, header[3]: iou_max_mean}) df = pd.DataFrame.from_records(csv, index=classes) df.to_excel(filepath) if __name__ == "__main__": main()
py
1a3adc3f32d1075ce6c8b97166346bfabb917174
# # Copyright 2013 eNovance <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base import wsme from ceilometer.api.controllers.v2 import base as v2_base class TestWsmeCustomType(base.BaseTestCase): def test_advenum_default(self): class dummybase(wsme.types.Base): ae = v2_base.AdvEnum("name", str, "one", "other", default="other") obj = dummybase() self.assertEqual("other", obj.ae) obj = dummybase(ae="one") self.assertEqual("one", obj.ae) self.assertRaises(wsme.exc.InvalidInput, dummybase, ae="not exists")
py
1a3add252d958a5c81f02581897890934f22204a
"""Main module.""" class EsbmcLauncher: """Configurations, setup and launch of ESBMC""" def __init__(self, esbmc_bin="esbmc"): self.esbmc_bin = esbmc_bin self.files = [] self.args = [] def load_json_args(self, json_file: str): """Loads a json file as esbmc options""" pass def show_loops(self): pass def show_goto_program(self): pass @staticmethod def get_esbmc() -> str: windows_link = "https://github.com/esbmc/esbmc/releases//latest/download/ESBMC-Windows.zip" macos_link = "https://github.com/esbmc/esbmc/releases/latest/download/ESBMC-Darwin.sh" linux_link = "https://github.com/esbmc/esbmc/releases/latest/download/ESBMC-Linux.sh" return linux_link def run(self): pass
py
1a3add9bfd3145a246ab0c3d6b9ef0bbb6c6416e
# AUTOGENERATED BY NBDEV! DO NOT EDIT! __all__ = ["index", "modules", "custom_doc_links", "git_url"] index = {"filter_post": "01_fb_scraper.ipynb", "extract_comments": "01_fb_scraper.ipynb", "FbScraper": "01_fb_scraper.ipynb"} modules = ["fb_scraper.py"] doc_url = "https://MJPansa.github.io/scrapinghub/" git_url = "https://github.com/MJPansa/scrapinghub/tree/master/" def custom_doc_links(name): return None
py
1a3ade117d94915d899a584254fcb61e86faa104
import asyncio from csv import DictReader from io import BytesIO import json import zipfile from gtfs_util.util import TextZipFile from gtfs_util import constants from gtfs_util.static.models import ( agency, service, service_update, route, point, stop_time, stop, transfer, trip, ) FILENAME_MODEL_MAPPING = { constants.AGENCY_FILENAME: agency.Agency, constants.SERVICE_FILENAME: service.Service, constants.SERVICE_UPDATE_FILENAME: service_update.ServiceUpdate, constants.ROUTE_FILENAME: route.Route, constants.POINT_FILENAME: point.Point, constants.STOP_TIME_FILENAME: stop_time.StopTime, constants.STOP_FILENAME: stop.Stop, constants.TRANSFER_FILENAME: transfer.Transfer, constants.TRIP_FILENAME: trip.Trip, } async def _read_async(data, file=True, mask=set()): if file: with zipfile.ZipFile(data, 'r') as f: infos = f.infolist() raw_data = { i.filename: DictReader(f.read(i.filename).decode().split('\r\n')) for i in infos if i not in mask } else: with BytesIO(data) as buffer: with zipfile.ZipFile(buffer, 'r') as f: infos = f.infolist() raw_data = { i.filename: DictReader(f.read(i.filename).decode().split('\r\n')) for i in infos if i not in mask } return raw_data async def load_async(*args, model=False, file=True, mask=set()): ops = ( _read_async(arg, file=fie, mask=mask) for arg in args ) feeds = await asyncio.gather(*ops) return _parse(feeds, model=model) async def load_aiter(*args, model=False, file=True, chunk_size=1, mask=set()): for arg in args: with TextZipFile(arg, 'r') as z: infos = z.infolist() for info in infos: name = info.filename if name in mask: continue with z.open(name, 'r') as f: reader = DictReader(f) static_model = FILENAME_MODEL_MAPPING[name] reader.fieldnames = normalize_names(static_model, reader.fieldnames) buffer = [] for line in reader: normalized_line = normalize_data(static_model, line) if model: data = static_model(**normalized_line) else: data = (normalized_line, arg, name) if chunk_size > 1: buffer.append(data) if len(buffer) == chunk_size: yield buffer buffer = [] else: yield data yield buffer def _read(data, file=True, mask=set()): if file: with zipfile.ZipFile(data, 'r') as f: infos = f.infolist() raw_data = { i.filename: DictReader(f.read(i.filename).decode().split('\r\n')) for i in infos if i not in mask } else: with BytesIO(data) as buffer: with zipfile.ZipFile(buffer, 'r') as f: infos = f.infolist() raw_data = { i.filename: DictReader(f.read(i.filename).decode().split('\r\n')) for i in infos if i not in mask } return raw_data def load(*args, model=False, file=True, mask=set()): feeds = ( _read(arg, file=file, mask=mask) for arg in args ) return _parse(feeds, model=model) def load_iter(*args, model=False, file=True, chunk_size=1, mask=set()): if not file: args = [BytesIO(arg) for arg in args] for arg in args: with TextZipFile(arg, 'r') as z: infos = z.infolist() for info in infos: name = info.filename if name in mask: continue with z.open(name, 'r') as f: reader = DictReader(f) static_model = FILENAME_MODEL_MAPPING[name] reader.fieldnames = normalize_names(static_model, reader.fieldnames) buffer = [] for line in reader: normalized_line = normalize_data(static_model, line) if model: data = (static_model(**normalized_line), arg, name) else: data = (normalized_line, arg, name) if chunk_size > 1: buffer.append(data) if len(buffer) == chunk_size: yield buffer buffer = [] else: yield data if chunk_size > 1: yield buffer if not file: arg.close() def _parse(feeds, model=False): data = { 'agency.txt': [], 'stops.txt': [], 'routes.txt': [], 'trips.txt': [], 'stop_times.txt': [], 'calendar.txt': [], 'calendar_dates.txt': [], 'fare_attributes.txt': [], 'fare_rules.txt': [], 'shapes.txt': [], 'frequencies.txt': [], 'transfers.txt': [], 'feed_info.txt': [], } for feed in feeds: for file, reader in feed.items(): static_model = FILENAME_MODEL_MAPPING[file] reader.fieldnames = normalize_names(static_model, reader.fieldnames) if model: data[file] += [static_model(**normalize_data(static_model, x)) for x in reader] else: data[file] += [normalize_data(static_model, x) for x in reader] return data def normalize_names(model, raw_data): transforms = model.NAME_MAPPING return [ transforms.get(name, None) or name.replace(model.PREFIX, '') for name in raw_data ] def normalize_data(model, raw_data): transforms = model.DATA_MAPPING return { k: v if not transforms.get(k) else transforms[k](v) for k, v in raw_data.items() }
py
1a3adf741e497113c9b7ca6bf692b8f84b0190c1
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from ._enums import * __all__ = ['FileShare'] class FileShare(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, access_tier: Optional[pulumi.Input[Union[str, 'ShareAccessTier']]] = None, account_name: Optional[pulumi.Input[str]] = None, enabled_protocols: Optional[pulumi.Input[Union[str, 'EnabledProtocols']]] = None, expand: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, root_squash: Optional[pulumi.Input[Union[str, 'RootSquashType']]] = None, share_name: Optional[pulumi.Input[str]] = None, share_quota: Optional[pulumi.Input[int]] = None, __props__=None, __name__=None, __opts__=None): """ Properties of the file share, including Id, resource name, resource type, Etag. API Version: 2021-01-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Union[str, 'ShareAccessTier']] access_tier: Access tier for specific share. GpV2 account can choose between TransactionOptimized (default), Hot, and Cool. FileStorage account can choose Premium. :param pulumi.Input[str] account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only. :param pulumi.Input[Union[str, 'EnabledProtocols']] enabled_protocols: The authentication protocol that is used for the file share. Can only be specified when creating a share. :param pulumi.Input[str] expand: Optional, used to create a snapshot. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: A name-value pair to associate with the share as metadata. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :param pulumi.Input[Union[str, 'RootSquashType']] root_squash: The property is for NFS share only. The default is NoRootSquash. :param pulumi.Input[str] share_name: The name of the file share within the specified storage account. File share names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter or number. :param pulumi.Input[int] share_quota: The maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120). For Large File Shares, the maximum size is 102400. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['access_tier'] = access_tier if account_name is None and not opts.urn: raise TypeError("Missing required property 'account_name'") __props__['account_name'] = account_name __props__['enabled_protocols'] = enabled_protocols __props__['expand'] = expand __props__['metadata'] = metadata if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['root_squash'] = root_squash __props__['share_name'] = share_name __props__['share_quota'] = share_quota __props__['access_tier_change_time'] = None __props__['access_tier_status'] = None __props__['deleted'] = None __props__['deleted_time'] = None __props__['etag'] = None __props__['last_modified_time'] = None __props__['name'] = None __props__['remaining_retention_days'] = None __props__['share_usage_bytes'] = None __props__['snapshot_time'] = None __props__['type'] = None __props__['version'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storage:FileShare"), pulumi.Alias(type_="azure-native:storage/latest:FileShare"), pulumi.Alias(type_="azure-nextgen:storage/latest:FileShare"), pulumi.Alias(type_="azure-native:storage/v20190401:FileShare"), pulumi.Alias(type_="azure-nextgen:storage/v20190401:FileShare"), pulumi.Alias(type_="azure-native:storage/v20190601:FileShare"), pulumi.Alias(type_="azure-nextgen:storage/v20190601:FileShare"), pulumi.Alias(type_="azure-native:storage/v20200801preview:FileShare"), pulumi.Alias(type_="azure-nextgen:storage/v20200801preview:FileShare"), pulumi.Alias(type_="azure-native:storage/v20210101:FileShare"), pulumi.Alias(type_="azure-nextgen:storage/v20210101:FileShare")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(FileShare, __self__).__init__( 'azure-native:storage:FileShare', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'FileShare': """ Get an existing FileShare resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["access_tier"] = None __props__["access_tier_change_time"] = None __props__["access_tier_status"] = None __props__["deleted"] = None __props__["deleted_time"] = None __props__["enabled_protocols"] = None __props__["etag"] = None __props__["last_modified_time"] = None __props__["metadata"] = None __props__["name"] = None __props__["remaining_retention_days"] = None __props__["root_squash"] = None __props__["share_quota"] = None __props__["share_usage_bytes"] = None __props__["snapshot_time"] = None __props__["type"] = None __props__["version"] = None return FileShare(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="accessTier") def access_tier(self) -> pulumi.Output[Optional[str]]: """ Access tier for specific share. GpV2 account can choose between TransactionOptimized (default), Hot, and Cool. FileStorage account can choose Premium. """ return pulumi.get(self, "access_tier") @property @pulumi.getter(name="accessTierChangeTime") def access_tier_change_time(self) -> pulumi.Output[str]: """ Indicates the last modification time for share access tier. """ return pulumi.get(self, "access_tier_change_time") @property @pulumi.getter(name="accessTierStatus") def access_tier_status(self) -> pulumi.Output[str]: """ Indicates if there is a pending transition for access tier. """ return pulumi.get(self, "access_tier_status") @property @pulumi.getter def deleted(self) -> pulumi.Output[bool]: """ Indicates whether the share was deleted. """ return pulumi.get(self, "deleted") @property @pulumi.getter(name="deletedTime") def deleted_time(self) -> pulumi.Output[str]: """ The deleted time if the share was deleted. """ return pulumi.get(self, "deleted_time") @property @pulumi.getter(name="enabledProtocols") def enabled_protocols(self) -> pulumi.Output[Optional[str]]: """ The authentication protocol that is used for the file share. Can only be specified when creating a share. """ return pulumi.get(self, "enabled_protocols") @property @pulumi.getter def etag(self) -> pulumi.Output[str]: """ Resource Etag. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="lastModifiedTime") def last_modified_time(self) -> pulumi.Output[str]: """ Returns the date and time the share was last modified. """ return pulumi.get(self, "last_modified_time") @property @pulumi.getter def metadata(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ A name-value pair to associate with the share as metadata. """ return pulumi.get(self, "metadata") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter(name="remainingRetentionDays") def remaining_retention_days(self) -> pulumi.Output[int]: """ Remaining retention days for share that was soft deleted. """ return pulumi.get(self, "remaining_retention_days") @property @pulumi.getter(name="rootSquash") def root_squash(self) -> pulumi.Output[Optional[str]]: """ The property is for NFS share only. The default is NoRootSquash. """ return pulumi.get(self, "root_squash") @property @pulumi.getter(name="shareQuota") def share_quota(self) -> pulumi.Output[Optional[int]]: """ The maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120). For Large File Shares, the maximum size is 102400. """ return pulumi.get(self, "share_quota") @property @pulumi.getter(name="shareUsageBytes") def share_usage_bytes(self) -> pulumi.Output[float]: """ The approximate size of the data stored on the share. Note that this value may not include all recently created or recently resized files. """ return pulumi.get(self, "share_usage_bytes") @property @pulumi.getter(name="snapshotTime") def snapshot_time(self) -> pulumi.Output[str]: """ Creation time of share snapshot returned in the response of list shares with expand param "snapshots". """ return pulumi.get(self, "snapshot_time") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type") @property @pulumi.getter def version(self) -> pulumi.Output[str]: """ The version of the share. """ return pulumi.get(self, "version") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
py
1a3ae02022d955f63bf60838b36bbc94184f571c
#!/usr/bin/python # -*- coding: utf-8 -*- ############################################################################## # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Amazon Software License (the "License"). You may not # # use this file except in compliance with the License. A copy of the # # License is located at # # # # http://aws.amazon.com/asl/ # # # # or in the "license" file accompanying this file. This file is distributed # # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # # express or implied. See the License for the specific language governing # # permissions and limitations under the License. # ############################################################################## from botocore.exceptions import ClientError import boto3 import os import logging import base64 import json import time REKOGNITION_FACE_SIMILARITY_THRESHOLD = int(os.environ['RekognitionFaceSimilarityThreshold']) COLLECTION_ID = os.environ['RekognitionCollectionName'] DYNAMODB_TABLE_NAME = os.environ['DynamoDBTableName'] LOG_LEVEL = os.environ['LogLevel'] SEND_ANONYMOUS_DATA = os.environ['SendAnonymousData'] dynamodb = boto3.client('dynamodb') rekognition = boto3.client('rekognition') logger = logging.getLogger() logger.setLevel(LOG_LEVEL) def generate_response(result, name, similarity): return { 'statusCode': 200, 'headers': {"Content-Type": "application/json"}, 'body': json.dumps({'result': result, 'name': name, 'similarity' : similarity}) } def update_item(face_id, similarity): ts = int(time.time()) dynamodb.update_item( TableName=DYNAMODB_TABLE_NAME, Key={'RekognitionId': {'S': face_id}}, UpdateExpression="SET GatePassed = :ts", ExpressionAttributeValues={':ts':{'S': str(ts)}} ) def lambda_handler(event, context): # logger.info(event) binary_image = base64.b64decode(event['body']) try: try: response = rekognition.search_faces_by_image( CollectionId=COLLECTION_ID, Image={'Bytes': binary_image}, FaceMatchThreshold=REKOGNITION_FACE_SIMILARITY_THRESHOLD, MaxFaces=1 ) except ClientError as err: code = err.response['Error']['Code'] if code in ['ProvisionedThroughputExceededException', 'ThrottlingException']: logger.exception() elif code in ['InvalidParameterException']: logger.info('No face in Rekognition') else: logger.exception(err) return generate_response('INVALID', '', 0) face_matches = response['FaceMatches'] if len(face_matches) > 0: face_match = face_matches[0] similarity = face_match['Similarity'] face = face_match['Face'] face_id = face['FaceId'] try: response = dynamodb.get_item( TableName=DYNAMODB_TABLE_NAME, Key={'RekognitionId': {'S': face_id}} ) name = response['Item']['Name']['S'] update_item(face_id, similarity) except Exception as err: logger.exception(err) return generate_response('INVALID', '', 0) logger.info('Above Rekognition Threshold. Similarity: {}'.format(similarity)) return generate_response('OK', name, similarity) else: logger.info('Similar Faces Not Found') return generate_response('NO_MATCH', '', 0) except Exception as err: logger.exception(err) return generate_response('INVALID', '', 0)
py
1a3ae09d4bd64c4055f240ed5927678be55077b6
# Copyright 2015 ETH Zurich # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :mod:`testcommon` --- Common test classes/utilities =================================================== """ # Stdlib from unittest.mock import MagicMock # External import nose.tools as ntools # SCION from lib.errors import SCIONBaseError class SCIONTestError(SCIONBaseError): pass def create_mock(attrs=None, class_=None): if attrs is None: attrs = [] if class_: attrs.append("__class__") m = MagicMock(spec_set=attrs) if class_: m.__class__ = class_ for attr in attrs: value = MagicMock(spec_set=[]) if attr == "__class__" and class_: value = class_ setattr(m, attr, value) return m def create_mock_full(kv=None, class_=None, return_value=None, side_effect=None): """ 'kv' is a dict "attr": val - directly sets attr to val. "attr()": val - sets the return value of attr() to val. "attr()...": val - sets the side_effects of attr() to val. """ def base(name): return name.rstrip("().") if not kv: kv = {} attrs = [] for k in kv: attrs.append(base(k)) m = create_mock(attrs, class_=class_) if return_value is not None: m.return_value = return_value if side_effect is not None: m.side_effect = side_effect for k, v in kv.items(): a = base(k) if k.endswith("()..."): f = getattr(m, a) setattr(f, "side_effect", v) elif k.endswith("()"): f = getattr(m, a) setattr(f, "return_value", v) else: setattr(m, a, v) return m def assert_these_calls(mock, calls, any_order=False): mock.assert_has_calls(calls, any_order=any_order) ntools.eq_(len(mock.mock_calls), len(calls)) def assert_these_call_lists(mock, call_lists, any_order=False): calls = [] for x in call_lists: calls.extend(x.call_list()) assert_these_calls(mock, calls, any_order=any_order)
py
1a3ae0be3bca6b7f5b854c1968fb8d3e089f3e33
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors # For information on the respective copyright owner see the NOTICE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..types import XMLBase from .size import Size from .normal import Normal class Plane(XMLBase): _NAME = 'plane' _TYPE = 'sdf' _CHILDREN_CREATORS = dict( size=dict(creator=Size, default=[[1, 1]]), normal=dict(creator=Normal) ) def __init__(self): super(Plane, self).__init__() self.reset() @property def size(self): return self._get_child_element('size') @size.setter def size(self, vec): self._add_child_element('size', vec) @property def normal(self): return self._get_child_element('normal') @normal.setter def normal(self, vec): self._add_child_element('normal', vec)
py
1a3ae1946ad60c45c87e97a1563187c7869f141f
import unittest import os import sys from steembase.account import PrivateKey import steembase.bip38 class Testcases(unittest.TestCase): def test_encrypt(self): self.assertEqual([ format( steembase.bip38.encrypt( PrivateKey( "5HqUkGuo62BfcJU5vNhTXKJRXuUi9QSE6jp8C3uBJ2BVHtB8WSd"), "TestingOneTwoThree"), "encwif"), format( steembase.bip38.encrypt( PrivateKey( "5KN7MzqK5wt2TP1fQCYyHBtDrXdJuXbUzm4A9rKAteGu3Qi5CVR"), "TestingOneTwoThree"), "encwif"), format( steembase.bip38.encrypt( PrivateKey( "5HtasZ6ofTHP6HCwTqTkLDuLQisYPah7aUnSKfC7h4hMUVw2gi5"), "Satoshi"), "encwif") ], [ "6PRN5mjUTtud6fUXbJXezfn6oABoSr6GSLjMbrGXRZxSUcxThxsUW8epQi", "6PRVWUbkzzsbcVac2qwfssoUJAN1Xhrg6bNk8J7Nzm5H7kxEbn2Nh2ZoGg", "6PRNFFkZc2NZ6dJqFfhRoFNMR9Lnyj7dYGrzdgXXVMXcxoKTePPX1dWByq" ]) def test_decrypt(self): self.assertEqual([ format( steembase.bip38.decrypt( "6PRN5mjUTtud6fUXbJXezfn6oABoSr6GSLjMbrGXRZxSUcxTh" "xsUW8epQi", "TestingOneTwoThree"), "wif"), format( steembase.bip38.decrypt( "6PRVWUbkzzsbcVac2qwfssoUJAN1Xhrg6bNk8J7Nzm5H7kxEb" "n2Nh2ZoGg", "TestingOneTwoThree"), "wif"), format( steembase.bip38.decrypt( "6PRNFFkZc2NZ6dJqFfhRoFNMR9Lnyj7dYGrzdgXXVMXcxoKTe" "PPX1dWByq", "Satoshi"), "wif") ], [ "5HqUkGuo62BfcJU5vNhTXKJRXuUi9QSE6jp8C3uBJ2BVHtB8WSd", "5KN7MzqK5wt2TP1fQCYyHBtDrXdJuXbUzm4A9rKAteGu3Qi5CVR", "5HtasZ6ofTHP6HCwTqTkLDuLQisYPah7aUnSKfC7h4hMUVw2gi5" ]) if __name__ == '__main__': unittest.main()
py
1a3ae22213490cdcb188159dded2be60a6ffb783
# Complete list of functional components used by this extension. # # Components can be disabled with build options matching `goost_*_enabled=no`. # A branch of components can be disabled as well, like: `goost_core_enabled=no`. # # NOTE: Components may not necessarily have structural meaning. # components = [ "core/image", "core/math", "scene/physics", "editor", ] def get_components(): comp = set() for n in components: parts = n.split("/") comp.update(parts) comp_list = list(comp) comp_list.sort() return comp_list def get_child_components(parent): comp_list = [] for n in components: parts = n.split("/") if not parent in parts: continue parts.reverse() for p in parts: if p == parent: break comp_list.append(p) return comp_list # # Complete list of all classes currently implemented in the extension, # excluding any classes provided from within `modules/` directory. # # This is used by config.py::get_doc_classes(), and potentially allow to disable # each of the class in the future. # class GoostClass: def __init__(self, name, deps=[]): self.name = name self.deps = [] def add_depencency(self, goost_class): self.deps.append(goost_class) classes = [ "GoostEngine", "GoostGeometry2D", "GoostImage", "GradientTexture2D", "ImageBlender", "ImageIndexed", "InvokeState", "LightTexture", "LinkedList", "ListNode", "PolyBoolean2D", "PolyBooleanParameters2D", "PolyDecomp2D", "PolyDecompParameters2D", "PolyOffset2D", "PolyOffsetParameters2D", "PolyNode2D", "PolyCircle2D", "PolyRectangle2D", "PolyShape2D", "PolyCollisionShape2D", "Random", "Random2D", "ShapeCast2D", "VariantMap", "VariantResource", "VisualShape2D", ] # Convert to dictionary, because we need to instantiate `GoostClass` nodes. _classes = {} for c in classes: _classes[c] = GoostClass(c) classes = _classes # Define dependencies. classes["GoostEngine"].add_depencency(classes["InvokeState"]) classes["GoostGeometry2D"].add_depencency(classes["PolyBoolean2D"]) classes["GoostGeometry2D"].add_depencency(classes["PolyDecomp2D"]) classes["GoostGeometry2D"].add_depencency(classes["PolyOffset2D"]) classes["LightTexture"].add_depencency(classes["GradientTexture2D"]) classes["LinkedList"].add_depencency(classes["ListNode"]) classes["PolyBoolean2D"].add_depencency(classes["PolyBooleanParameters2D"]) classes["PolyBoolean2D"].add_depencency(classes["PolyNode2D"]) classes["PolyDecomp2D"].add_depencency(classes["PolyDecompParameters2D"]) classes["PolyOffset2D"].add_depencency(classes["PolyOffsetParameters2D"]) classes["PolyCircle2D"].add_depencency(classes["PolyNode2D"]) classes["PolyRectangle2D"].add_depencency(classes["PolyNode2D"]) classes["PolyShape2D"].add_depencency(classes["PolyNode2D"]) classes["PolyCollisionShape2D"].add_depencency(classes["PolyNode2D"]) classes["Random2D"].add_depencency(classes["Random"]) def resolve_dependency(goost_class): resolved = set() def resolve(c, r_resolved): for n in c.deps: resolve(n, r_resolved) r_resolved.add(c) resolve(goost_class, resolved) resolved_list = [] for c in resolved: resolved_list.append(c.name) return resolved_list classes_enabled = [] for c in classes: classes_enabled.append(c) classes_disabled = [] try: import custom try: classes_disabled = custom.goost_classes_disabled for c in classes_disabled: if not c in classes: raise NameError("Goost: Requested to disable non-existing class.") classes_enabled.remove(c) except AttributeError: pass except ImportError: pass
py
1a3ae236b0605a89c9462af0e945fe8ca9d06e7d
from typing import Optional, Dict from cmd.package.HBShedPackageHandler import HBShedPackageHandler from cmd.package.modules.Module import Module from cmd.package.modules.ModulesHandler import ModulesHandler class ApplyModulePeerDependencies: def __init__(self, package: HBShedPackageHandler, dependencies: Optional[Dict[str, str]]) -> None: self.__package: HBShedPackageHandler = package self.__dependencies: Optional[Dict[str, str]] = dependencies def __apply(self): self.__package.set_peer_dependencies(self.__dependencies) self.__package.write() def __apply_modules_peer_dependencies(self): if self.__package.config().has_modules(): modules: ModulesHandler = ModulesHandler(self.__package) module: Module for module in modules.modules: ApplyModulePeerDependencies( package=module.package, dependencies=self.__dependencies ).process() def process(self): self.__apply() self.__apply_modules_peer_dependencies()
py
1a3ae265d756d858efb281ec8dab0d807159787b
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A `Bijector` that computes `b(x) = 1. / x`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow as tf from tensorflow_probability.python.bijectors import bijector from tensorflow_probability.python.internal import distribution_util __all__ = ["Reciprocal"] class Reciprocal(bijector.Bijector): """A `Bijector` that computes the reciprocal `b(x) = 1. / x` entrywise. This bijector accepts any non-zero values for both `forward` and `inverse`. #### Examples ```python bijector.Reciprocal().forward(x=[[1., 2.], [4., 5.]]) # Result: [[1., .5], [.25, .2]], i.e., 1 / x bijector.Reciprocal().forward(x=[[0., 2.], [4., 5.]]) # Result: AssertionError, doesn't accept zero. bijector.Square().inverse(y=[[1., 2.], [4., 5.]]) # Result: [[1., .5], [.25, .2]], i.e. 1 / x ``` """ def __init__(self, validate_args=False, name="reciprocal"): """Instantiates the `Reciprocal`. Args: validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._name = name super(Reciprocal, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) def _forward(self, x): x = self._maybe_assert_valid(x) return 1. / x _inverse = _forward def _forward_log_det_jacobian(self, x): x = self._maybe_assert_valid(x) return -2. * tf.math.log(tf.abs(x)) _inverse_log_det_jacobian = _forward_log_det_jacobian def _maybe_assert_valid(self, t): if not self.validate_args: return t is_valid = tf.compat.v1.assert_none_equal( t, 0., message="All elements must be non-zero.") return distribution_util.with_dependencies([is_valid], t)
py
1a3ae3252be044576368d4d304417f5dfa210bba
#!/usr/bin/env python3 """""" from math import pi as π from typing import Union def checker(func): """Decorator to check the inputs are valid. Uses the CheckCircleInputs class to determine if the radius input is valid. """ def wrapper(*args, **kwargs): CheckCircleInputs(*args, **kwargs) return func(*args, **kwargs) return wrapper @checker def circle_circumference(radius: Union[int, float]): """Calculate the circumference of a circle.""" return 2 * π * radius @checker def circle_area(radius: Union[int, float]): """Calculate the area of a circle.""" return π * (radius**2) class CheckCircleInputs: def __init__(self, *args, **kwargs): radius = None if args: radius = args[-1] if 'radius' in kwargs: radius = kwargs['radius'] self.type_check(radius) self.value_check(radius) def type_check(self, radius): if type(radius) not in [int, float]: raise TypeError( f"Radius must be a number. Input was type(radius)={type(radius)}") def value_check(self, radius): if radius < 0: raise ValueError( f"Radius cannot be negative. Input was radius={radius}.") class Circle: @checker def __init__(self, radius: Union[int, float]): self.radius = radius def circle_circumference(self): return 2 * π * self.radius def circle_area(self): return π * (self.radius**2)
py
1a3ae46310253b290e9fa40c218a7ce2ed62cc0e
# Copyright (c) 2016 Cisco Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import six from oslo_log import log as logging from aim.aim_lib.db import model from aim.api import resource from aim import exceptions from aim import utils as aim_utils LOG = logging.getLogger(__name__) class VrfNotVisibleFromExternalNetwork(exceptions.AimException): message = "%(vrf)s is not visible from %(ext_net)s." class L3OutsideVrfChangeDisallowed(exceptions.AimException): message = ("Cannot change VRF referenced by no-NAT L3Out %(l3out)s from " "%(old_vrf)s to %(vrf)s.") @six.add_metaclass(abc.ABCMeta) class NatStrategy(object): """Interface for NAT behavior strategies. Defines interface for configuring L3Outside in AIM to support various kinds of NAT-ing. All methods expect AIM resources as input parameters. Example usage: 1. Decide a NAT-strategy to use mgr = AimManager() ctx = AimContext() ns = DistributedNatStrategy(mgr) # or NoNatEdgeStrategy(mgr), # or EdgeNatStrategy(mgr) 2. Create L3Outside and one or more ExternalNetworks. Subnets may be created in the L3Outside l3out = L3Outside(tenant_name='t1', name='out') ext_net1 = ExternalNetwork(tenant_name='t1', l3out_name='out', name='inet1') ext_net2 = ExternalNetwork(tenant_name='t1', l3out_name='out', name='inet2') ns.create_l3outside(ctx, l3out) ns.create_subnet(ctx, l3out, '40.40.40.1/24') ns.create_external_network(ctx, ext_net1) ns.create_external_network(ctx, ext_net2) 3. Allow traffic for certain IP-addresses through the external networks; by default no traffic is allowed. ns.update_external_cidrs(ctx, ext_net1, ['0.0.0.0/0']) ns.update_external_cidrs(ctx, ext_net2, ['200.200.0.0/16', '300.0.0.0/8']) 4. To provide external-connectivity to a VRF, connect the VRF to ExternalNetwork with appropriate contracts. ext_net1.provided_contract_names = ['http', 'icmp'] ext_net1.consumed_contract_names = ['arp'] vrf = VRF(...) ns.connect_vrf(ctx, ext_net1, vrf) 5. Call connect_vrf() again to update the contracts ext_net1.provided_contract_names = ['http', 'https'] ext_net1.consumed_contract_names = ['ping'] ns.connect_vrf(ctx, ext_net1, vrf) 6. Disallow external-connectivity to VRF ns.disconnect_vrf(ctx, ext_net1, vrf) 7. Delete ExternalNetwork, subnet and L3Outside ns.delete_external_network(ctx, ext_net1) ns.delete_external_network(ctx, ext_net2) ns.delete_subnet(ctx, l3out, '40.40.40.1/24') ns.delete_l3outside(ctx, l3out) """ @abc.abstractmethod def create_l3outside(self, ctx, l3outside, vmm_domains=None, phys_domains=None): """Create L3Outside object if needed. :param ctx: AIM context :param l3outside: L3Outside AIM resource :return: L3Outside resource """ @abc.abstractmethod def delete_l3outside(self, ctx, l3outside): """Delete L3Outside object. :param ctx: AIM context :param l3outside: L3Outside AIM resource :return: """ @abc.abstractmethod def get_l3outside_resources(self, ctx, l3outside): """Get AIM resources that are created for an L3Outside object. :param ctx: AIM context :param l3outside: L3Outside AIM resource :return: List of AIm resources """ @abc.abstractmethod def create_subnet(self, ctx, l3outside, gw_ip_mask): """Create Subnet in L3Outside. :param ctx: AIM context :param l3outside: L3Outside AIM resource :param gw_ip_mask: Gateway+CIDR of subnet to create :return: """ @abc.abstractmethod def delete_subnet(self, ctx, l3outside, gw_ip_mask): """Delete Subnet in L3Outside. :param ctx: AIM context :param l3outside: L3Outside AIM resource :param gw_ip_mask: Gateway+CIDR of subnet to delete :return: """ @abc.abstractmethod def get_subnet(self, ctx, l3outside, gw_ip_mask): """Get Subnet in L3Outside with specified Gateway+CIDR. :param ctx: AIM context :param l3outside: L3Outside AIM resource :param gw_ip_mask: Gateway+CIDR of subnet to fetch :return: AIM Subnet if one is found """ @abc.abstractmethod def create_external_network(self, ctx, external_network): """Create ExternalNetwork object if needed. :param ctx: AIM context :param external_network: ExternalNetwork AIM resource :return: ExternalNetwork resource """ @abc.abstractmethod def delete_external_network(self, ctx, external_network): """Delete ExternalNetwork object. :param ctx: AIM context :param external_network: ExternalNetwork AIM resource :return: """ @abc.abstractmethod def update_external_cidrs(self, ctx, external_network, external_cidrs): """Set the IP addresses for which external traffic is allowed. :param ctx: AIM context :param external_network: ExternalNetwork AIM resource :param external_cidrs: List of CIDRs to allow :return: """ @abc.abstractmethod def connect_vrf(self, ctx, external_network, vrf): """Allow external connectivity to VRF. Create or update NAT machinery to allow external connectivity from a given VRF to an ExternalNetwork (L3Outside) enforcing the policies specified in ExternalNetwork. :param ctx: AIM context :param external_network: AIM ExternalNetwork :param vrf: AIM VRF :return: """ @abc.abstractmethod def disconnect_vrf(self, ctx, external_network, vrf): """Remove external connectivity for VRF. Tear down connectivity between VRF and ExternalNetwork (L3Outside). :param ctx: AIM context :param external_network: AIM ExternalNetwork :param vrf: AIM VRF """ @abc.abstractmethod def read_vrfs(self, ctx, external_network): """Read external connectivity VRFs. :param ctx: AIM context :param external_network: AIM ExternalNetwork """ @abc.abstractmethod def set_bd_l3out(self, ctx, bridge_domain, l3outside): """Add the l3out to the BD's associated l3out list if needed. Right now only NoNat needs to do this. :param ctx: AIM context :param bridge_domain: BridgeDomain AIM resource :param l3outside: L3Outside AIM resource """ @abc.abstractmethod def unset_bd_l3out(self, ctx, bridge_domain, l3outside): """Remove the l3out from the BD's associated l3out list if needed. Right now only NoNat needs to do this. :param ctx: AIM context :param bridge_domain: BridgeDomain AIM resource :param l3outside: L3Outside AIM resource """ class NatStrategyMixin(NatStrategy): """Implements common functionality between different NAT strategies.""" def __init__(self, mgr): self.mgr = mgr self.db = model.CloneL3OutManager() def create_l3outside(self, ctx, l3outside, vmm_domains=None, phys_domains=None): return self._create_l3out(ctx, l3outside, vmm_domains=vmm_domains, phys_domains=phys_domains) def delete_l3outside(self, ctx, l3outside): self._delete_l3out(ctx, l3outside) def get_l3outside_resources(self, ctx, l3outside): res = [] l3out = self.mgr.get(ctx, l3outside) if l3out: res.append(l3out) for obj in self._get_nat_objects(ctx, l3out): obj_db = self.mgr.get(ctx, obj) if obj_db: res.append(obj_db) ext_vrf = self._vrf_by_name(ctx, l3out.vrf_name, l3out.tenant_name) if ext_vrf: res.append(ext_vrf) return res def create_external_network(self, ctx, external_network): return self._create_ext_net(ctx, external_network) def delete_external_network(self, ctx, external_network): self._delete_ext_net(ctx, external_network) def create_subnet(self, ctx, l3outside, gw_ip_mask): l3outside = self.mgr.get(ctx, l3outside) if l3outside: nat_bd = self._get_nat_bd(ctx, l3outside) sub = resource.Subnet(tenant_name=nat_bd.tenant_name, bd_name=nat_bd.name, gw_ip_mask=gw_ip_mask) if not self.mgr.get(ctx, sub): self.mgr.create(ctx, sub) def delete_subnet(self, ctx, l3outside, gw_ip_mask): l3outside = self.mgr.get(ctx, l3outside) if l3outside: nat_bd = self._get_nat_bd(ctx, l3outside) sub = resource.Subnet(tenant_name=nat_bd.tenant_name, bd_name=nat_bd.name, gw_ip_mask=gw_ip_mask) self.mgr.delete(ctx, sub) def get_subnet(self, ctx, l3outside, gw_ip_mask): l3outside = self.mgr.get(ctx, l3outside) if l3outside: nat_bd = self._get_nat_bd(ctx, l3outside) sub = resource.Subnet(tenant_name=nat_bd.tenant_name, bd_name=nat_bd.name, gw_ip_mask=gw_ip_mask) return self.mgr.get(ctx, sub) def update_external_cidrs(self, ctx, external_network, external_cidrs): ext_net_db = self.mgr.get(ctx, external_network) if ext_net_db: self._manage_external_subnets(ctx, ext_net_db, external_cidrs) # This is only needed for NoNat def set_bd_l3out(self, ctx, bridge_domain, l3outside): pass # This is only needed for NoNat def unset_bd_l3out(self, ctx, bridge_domain, l3outside): pass def _create_l3out(self, ctx, l3out, vmm_domains=None, phys_domains=None): """Create NAT EPG etc. in addition to creating L3Out.""" with ctx.store.begin(subtransactions=True): tenant = resource.Tenant(name=l3out.tenant_name) if not self.mgr.get(ctx, tenant): self.mgr.create(ctx, tenant) l3out_db = self.mgr.get(ctx, l3out) if not l3out_db: ext_vrf = self._get_nat_vrf(ctx, l3out) if not self.mgr.get(ctx, ext_vrf): self.mgr.create(ctx, ext_vrf) l3out_db = copy.copy(l3out) l3out_db.vrf_name = ext_vrf.name l3out_db = self.mgr.create(ctx, l3out_db) self._create_nat_epg(ctx, l3out_db, vmm_domains=vmm_domains, phys_domains=phys_domains) return l3out_db def _delete_l3out(self, ctx, l3out, delete_epg=True): """Delete NAT EPG etc. in addition to deleting L3Out.""" with ctx.store.begin(subtransactions=True): l3out_db = self.mgr.get(ctx, l3out) if l3out_db: for en in self.mgr.find(ctx, resource.ExternalNetwork, tenant_name=l3out.tenant_name, l3out_name=l3out.name): self.delete_external_network(ctx, en) if not l3out_db.monitored: self.mgr.delete(ctx, l3out) if delete_epg: self._delete_nat_epg(ctx, l3out_db) # delete NAT VRF if any self.mgr.delete(ctx, self._get_nat_vrf(ctx, l3out_db)) def _create_ext_net(self, ctx, ext_net): with ctx.store.begin(subtransactions=True): ext_net_db = self.mgr.get(ctx, ext_net) if not ext_net_db: ext_net_db = self.mgr.create(ctx, ext_net) l3out = self.mgr.get(ctx, self._ext_net_to_l3out(ext_net)) contract = self._get_nat_contract(ctx, l3out) ext_net_db = self._update_contract(ctx, ext_net_db, contract, is_remove=False) return ext_net_db def _delete_ext_net(self, ctx, ext_net): with ctx.store.begin(subtransactions=True): ext_net_db = self.mgr.get(ctx, ext_net) if ext_net_db: self._manage_external_subnets(ctx, ext_net_db, []) if not ext_net_db.monitored: self.mgr.delete(ctx, ext_net) else: l3out = self.mgr.get( ctx, self._ext_net_to_l3out(ext_net)) contract = self._get_nat_contract(ctx, l3out) self._update_contract(ctx, ext_net_db, contract, is_remove=True) def _manage_external_subnets(self, ctx, ext_net, new_cidrs): new_cidrs = new_cidrs[:] if new_cidrs else [] ext_sub_attr = dict(tenant_name=ext_net.tenant_name, l3out_name=ext_net.l3out_name, external_network_name=ext_net.name) old_ext_subs = self.mgr.find(ctx, resource.ExternalSubnet, **ext_sub_attr) with ctx.store.begin(subtransactions=True): for sub in old_ext_subs: if sub.cidr in new_cidrs: new_cidrs.remove(sub.cidr) else: self.mgr.delete(ctx, sub) for c in new_cidrs: self.mgr.create(ctx, resource.ExternalSubnet(cidr=c, **ext_sub_attr)) def _ext_net_to_l3out(self, ext_net): return resource.L3Outside(tenant_name=ext_net.tenant_name, name=ext_net.l3out_name) def _display_name(self, res): return (getattr(res, 'display_name', None) or res.name) def _get_nat_ap_epg(self, ctx, l3out): d_name = self._display_name(l3out) ap_name = getattr(self, 'app_profile_name', None) or l3out.name ap_name = self._scope_name_if_common(l3out.tenant_name, ap_name) ap_display_name = aim_utils.sanitize_display_name(ap_name or d_name) ap = resource.ApplicationProfile( tenant_name=l3out.tenant_name, name=ap_name, display_name=ap_display_name) epg = resource.EndpointGroup( tenant_name=ap.tenant_name, app_profile_name=ap.name, name='EXT-%s' % l3out.name, display_name=aim_utils.sanitize_display_name('EXT-%s' % d_name)) return (ap, epg) def _get_nat_contract(self, ctx, l3out): d_name = self._display_name(l3out) contract_name = self._scope_name_if_common(l3out.tenant_name, 'EXT-%s' % l3out.name) return resource.Contract( tenant_name=l3out.tenant_name, name=contract_name, display_name=self._scope_name_if_common( l3out.tenant_name, aim_utils.sanitize_display_name('EXT-%s' % d_name))) def _get_nat_bd(self, ctx, l3out): d_name = self._display_name(l3out) bd_name = self._scope_name_if_common(l3out.tenant_name, 'EXT-%s' % l3out.name) return resource.BridgeDomain( tenant_name=l3out.tenant_name, name=bd_name, display_name=self._scope_name_if_common( l3out.tenant_name, aim_utils.sanitize_display_name('EXT-%s' % d_name)), limit_ip_learn_to_subnets=True, l3out_names=[l3out.name]) def _get_nat_vrf(self, ctx, l3out): d_name = self._display_name(l3out) vrf_name = self._scope_name_if_common(l3out.tenant_name, 'EXT-%s' % l3out.name) return resource.VRF( tenant_name=l3out.tenant_name, name=vrf_name, display_name=self._scope_name_if_common( l3out.tenant_name, aim_utils.sanitize_display_name('EXT-%s' % d_name))) def _get_nat_objects(self, ctx, l3out): sani = aim_utils.sanitize_display_name scope = self._scope_name_if_common d_name = self._display_name(l3out) filter_name = scope(l3out.tenant_name, 'EXT-%s' % l3out.name) fltr = resource.Filter( tenant_name=l3out.tenant_name, name=filter_name, display_name=sani(scope(l3out.tenant_name, 'EXT-%s' % d_name))) entry = resource.FilterEntry( tenant_name=fltr.tenant_name, filter_name=fltr.name, name='Any', display_name='Any') contract = self._get_nat_contract(ctx, l3out) subject = resource.ContractSubject( tenant_name=contract.tenant_name, contract_name=contract.name, name='Allow', display_name='Allow') subject_filter = resource.ContractSubjFilter( tenant_name=contract.tenant_name, contract_name=contract.name, contract_subject_name='Allow', filter_name=fltr.name) bd = self._get_nat_bd(ctx, l3out) bd.vrf_name = l3out.vrf_name ap, epg = self._get_nat_ap_epg(ctx, l3out) vm_doms = getattr( self, 'vmm_domains', [{'type': d.type, 'name': d.name} for d in self.mgr.find(ctx, resource.VMMDomain)]) phy_doms = getattr( self, 'physical_domains', [{'name': d.name} for d in self.mgr.find(ctx, resource.PhysicalDomain)]) epg.bd_name = bd.name epg.provided_contract_names = [contract.name] epg.consumed_contract_names = [contract.name] epg.vmm_domains = vm_doms epg.physical_domains = phy_doms return [fltr, entry, contract, subject, subject_filter, bd, ap, epg] def _select_domains(self, objs, vmm_domains=None, phys_domains=None): for obj in objs: if isinstance(obj, resource.EndpointGroup): if vmm_domains is not None: obj.vmm_domains = vmm_domains if phys_domains is not None: obj.physical_domains = phys_domains def _create_nat_epg(self, ctx, l3out, vmm_domains=None, phys_domains=None): objs = self._get_nat_objects(ctx, l3out) self._select_domains(objs, vmm_domains=vmm_domains, phys_domains=phys_domains) with ctx.store.begin(subtransactions=True): for r in objs: if not self.mgr.get(ctx, r): self.mgr.create(ctx, r) def _delete_nat_epg(self, ctx, l3out): with ctx.store.begin(subtransactions=True): nat_bd = self._get_nat_bd(ctx, l3out) for sub in self.mgr.find(ctx, resource.Subnet, tenant_name=nat_bd.tenant_name, bd_name=nat_bd.name): self.mgr.delete(ctx, sub) for r in reversed(self._get_nat_objects(ctx, l3out)): if isinstance(r, resource.ApplicationProfile): epgs = self.mgr.find(ctx, resource.EndpointGroup, tenant_name=r.tenant_name, app_profile_name=r.name) if epgs: continue self.mgr.delete(ctx, r) def _update_contract(self, ctx, ext_net, contract, is_remove): if is_remove: prov = [c for c in ext_net.provided_contract_names if c != contract.name] cons = [c for c in ext_net.consumed_contract_names if c != contract.name] else: prov = [contract.name] prov.extend(ext_net.provided_contract_names) cons = [contract.name] cons.extend(ext_net.consumed_contract_names) ext_net = self.mgr.update(ctx, ext_net, provided_contract_names=prov, consumed_contract_names=cons) return ext_net def _is_visible(self, target_tenant, from_tenant): return (target_tenant == from_tenant or target_tenant == 'common') def _vrf_by_name(self, ctx, vrf_name, tenant_name_hint): vrfs = self.mgr.find(ctx, resource.VRF, tenant_name=tenant_name_hint, name=vrf_name) if vrfs: return vrfs[0] vrfs = self.mgr.find(ctx, resource.VRF, tenant_name='common', name=vrf_name) if vrfs: return vrfs[0] def _scope_name_if_common(self, tenant_name, name): if tenant_name == 'common': scope = getattr(self, 'common_scope', None) scope = scope + '_' if scope else '' return aim_utils.sanitize_display_name(scope + name) return name class NoNatStrategy(NatStrategyMixin): """No NAT Strategy. Provides direct external connectivity without any network address translation. """ def __init__(self, mgr): super(NoNatStrategy, self).__init__(mgr) def delete_external_network(self, ctx, external_network): """Clean-up any connected VRFs before deleting the external network.""" with ctx.store.begin(subtransactions=True): ext_net = self.mgr.get(ctx, external_network) if not ext_net: return l3out = self.mgr.get(ctx, self._ext_net_to_l3out(external_network)) vrf = self._vrf_by_name(ctx, l3out.vrf_name, l3out.tenant_name) if vrf: self._disconnect_vrf_from_l3out(ctx, l3out, vrf) self._delete_ext_net(ctx, ext_net) def connect_vrf(self, ctx, external_network, vrf): """Allow external connectivity to VRF. Make external_network provide/consume specified contracts. Locate BDs referring to the VRF, and include L3Outside in their l3out_names. """ with ctx.store.begin(subtransactions=True): if not self._is_visible(vrf.tenant_name, external_network.tenant_name): raise VrfNotVisibleFromExternalNetwork( vrf=vrf, ext_net=external_network) ext_net = self.mgr.get(ctx, external_network) if not ext_net: return l3out = self.mgr.get(ctx, self._ext_net_to_l3out(external_network)) old_vrf = self._vrf_by_name(ctx, l3out.vrf_name, l3out.tenant_name) if not old_vrf or old_vrf.identity != vrf.identity: LOG.error('connect_vrf: cannot change VRF connected to ' 'no-NAT L3Outside %s', l3out) raise L3OutsideVrfChangeDisallowed(l3out=l3out, old_vrf=old_vrf, vrf=vrf) nat_bd = self._get_nat_bd(ctx, l3out) self._set_bd_l3out(ctx, l3out, vrf, exclude_bd=nat_bd) contract = self._get_nat_contract(ctx, l3out) prov = list(set(external_network.provided_contract_names + [contract.name])) cons = list(set(external_network.consumed_contract_names + [contract.name])) self.mgr.update(ctx, external_network, provided_contract_names=prov, consumed_contract_names=cons) def disconnect_vrf(self, ctx, external_network, vrf): """Remove external connectivity for VRF. Remove contracts provided/consumed by external_network. Locate BDs referring to the VRF, and exclude L3Outside from their l3out_names. """ with ctx.store.begin(subtransactions=True): ext_net = self.mgr.get(ctx, external_network) if not ext_net: return l3out = self.mgr.get(ctx, self._ext_net_to_l3out(external_network)) old_vrf = self._vrf_by_name(ctx, l3out.vrf_name, l3out.tenant_name) if old_vrf and old_vrf.identity != vrf.identity: LOG.info('disconnect_vrf: %s is not connected to %s', ext_net, vrf) return self._disconnect_vrf_from_l3out(ctx, l3out, vrf) contract = self._get_nat_contract(ctx, l3out) self.mgr.update(ctx, external_network, provided_contract_names=[contract.name], consumed_contract_names=[contract.name]) def read_vrfs(self, ctx, external_network): l3out = self.mgr.get(ctx, self._ext_net_to_l3out(external_network)) vrf = self._vrf_by_name(ctx, l3out.vrf_name, l3out.tenant_name) return [vrf] if vrf else [] def set_bd_l3out(self, ctx, bridge_domain, l3outside): bridge_domain = self.mgr.get(ctx, bridge_domain) if bridge_domain and l3outside.name not in bridge_domain.l3out_names: self.mgr.update( ctx, bridge_domain, l3out_names=bridge_domain.l3out_names + [l3outside.name]) def unset_bd_l3out(self, ctx, bridge_domain, l3outside): bridge_domain = self.mgr.get(ctx, bridge_domain) if bridge_domain and l3outside.name in bridge_domain.l3out_names: bridge_domain.l3out_names.remove(l3outside.name) self.mgr.update(ctx, bridge_domain, l3out_names=bridge_domain.l3out_names) def _get_bds_in_vrf_for_l3out(self, ctx, vrf, l3out): if vrf.tenant_name == 'common' and l3out.tenant_name == 'common': # BDs in all tenants are candidates - locate all BDs whose # vrf_name matches vrf.name, and exclude those that have a # local VRF aliasing the given VRF. all_bds = self.mgr.find(ctx, resource.BridgeDomain, vrf_name=vrf.name) bd_tenants = set([b.tenant_name for b in all_bds]) bd_tenants = [t for t in bd_tenants if not self.mgr.get( ctx, resource.VRF(tenant_name=t, name=vrf.name))] return [b for b in all_bds if b.tenant_name in bd_tenants] elif (vrf.tenant_name == 'common' or vrf.tenant_name == l3out.tenant_name): # VRF and L3out are visible only to BDs in l3out's tenant return self.mgr.find(ctx, resource.BridgeDomain, tenant_name=l3out.tenant_name, vrf_name=vrf.name) # Other combinations of L3Out and VRF are not valid # configurations and can be excluded: # 1. L3out in common, VRF not in common: VRF is not # visible to L3out # 2. L3Out and VRF are in different non-common tenants: # VRF is not visible to L3out return [] def _set_bd_l3out(self, ctx, l3outside, vrf, exclude_bd=None): # update all the BDs for bd in self._get_bds_in_vrf_for_l3out(ctx, vrf, l3outside): if exclude_bd and exclude_bd.identity == bd.identity: continue # Add L3Out to existing list if l3outside.name not in bd.l3out_names: self.mgr.update(ctx, bd, l3out_names=bd.l3out_names + [l3outside.name]) def _unset_bd_l3out(self, ctx, l3outside, vrf, exclude_bd=None): # update all the BDs for bd in self._get_bds_in_vrf_for_l3out(ctx, vrf, l3outside): if exclude_bd and exclude_bd.identity == bd.identity: continue # Remove L3Out from existing list if l3outside.name in bd.l3out_names: bd.l3out_names.remove(l3outside.name) self.mgr.update(ctx, bd, l3out_names=bd.l3out_names) def _disconnect_vrf_from_l3out(self, ctx, l3outside, vrf): nat_bd = self._get_nat_bd(ctx, l3outside) self._unset_bd_l3out(ctx, l3outside, vrf, exclude_bd=nat_bd) class DistributedNatStrategy(NatStrategyMixin): """Distributed NAT Strategy. Provides external connectivity with network address translation (DNAT/SNAT) where the translation is distributed amongst nodes in the fabric. """ def delete_external_network(self, ctx, external_network): """Delete external-network from main and cloned L3Outs. """ with ctx.store.begin(subtransactions=True): # Delete specified external-network from all cloned L3Outs. # Delete external-network from main L3Out. l3out = self.mgr.get(ctx, self._ext_net_to_l3out(external_network)) ext_net_db = self.mgr.get(ctx, external_network) if l3out and ext_net_db: clone_l3outs = self._find_l3out_clones(ctx, l3out) for clone in clone_l3outs: clone_ext_net = resource.ExternalNetwork( tenant_name=clone.tenant_name, l3out_name=clone.name, name=ext_net_db.name) self._delete_ext_net(ctx, clone_ext_net) self._delete_unused_l3out(ctx, clone) self._delete_ext_net(ctx, ext_net_db) def update_external_cidrs(self, ctx, external_network, external_cidrs): """Update external CIDRs in main and cloned ExternalNetworks.""" l3out = self.mgr.get(ctx, self._ext_net_to_l3out(external_network)) ext_net_db = self.mgr.get(ctx, external_network) if l3out and ext_net_db: clone_l3outs = self._find_l3out_clones(ctx, l3out) with ctx.store.begin(subtransactions=True): for clone in clone_l3outs: clone_ext_net = resource.ExternalNetwork( tenant_name=clone.tenant_name, l3out_name=clone.name, name=external_network.name) self._manage_external_subnets(ctx, clone_ext_net, external_cidrs) self._manage_external_subnets(ctx, ext_net_db, external_cidrs) def connect_vrf(self, ctx, external_network, vrf): """Allow external connectivity to VRF. Create shadow L3Outside for L3Outside-VRF combination in VRF's tenant, if required. Create ExternalNetwork and ExternalSubnet(s) in the shadow L3Out, if required. Set vrf_name of shadow L3Outside to VRF. """ with ctx.store.begin(subtransactions=True): return self._create_shadow(ctx, external_network, vrf) def disconnect_vrf(self, ctx, external_network, vrf): """Remove external connectivity for VRF. Delete ExternalNetwork and contained ExternalSubnet in the shadow L3Outside. Remove shadow L3Outside if there are no more ExternalNetworks in the shadow L3Outside. """ with ctx.store.begin(subtransactions=True): self._delete_shadow(ctx, external_network, vrf) def read_vrfs(self, ctx, external_network): l3out = self.mgr.get(ctx, self._ext_net_to_l3out(external_network)) result = [] for c in self.db.get_clones(ctx, l3out): l3c = self.mgr.get(ctx, resource.L3Outside(tenant_name=c[0], name=c[1])) if l3c: vrf = self.mgr.get( ctx, resource.VRF(tenant_name=l3c.tenant_name, name=l3c.vrf_name)) if vrf: result.append(vrf) return result def _generate_l3out_name(self, l3outside, vrf): # Generate a name based on its relationship with VRF name = '%s-%s' % (l3outside.name, vrf.name) display_name = aim_utils.sanitize_display_name( '%s-%s' % (self._display_name(l3outside), self._display_name(vrf))) return (name, display_name) def _make_l3out_clone(self, ctx, l3out, vrf): new_tenant = vrf.tenant_name new_name, new_display_name = self._generate_l3out_name(l3out, vrf) clone_l3out = resource.L3Outside( tenant_name=new_tenant, name=new_name, display_name=new_display_name, vrf_name=vrf.name) return clone_l3out def _create_shadow(self, ctx, ext_net, vrf, with_nat_epg=True): """Clone ExternalNetwork as a shadow.""" ext_net_db = self.mgr.get(ctx, ext_net) if not ext_net_db: return l3out = self.mgr.get(ctx, self._ext_net_to_l3out(ext_net_db)) clone_l3out = self._make_l3out_clone(ctx, l3out, vrf) clone_ext_net = resource.ExternalNetwork( tenant_name=clone_l3out.tenant_name, l3out_name=clone_l3out.name, display_name=ext_net_db.display_name, **{k: getattr(ext_net, k) for k in ['name', 'provided_contract_names', 'consumed_contract_names']}) if with_nat_epg: _, nat_epg = self._get_nat_ap_epg(ctx, l3out) clone_ext_net.nat_epg_dn = nat_epg.dn with ctx.store.begin(subtransactions=True): self.mgr.create(ctx, clone_l3out, overwrite=True) self.mgr.create(ctx, clone_ext_net, overwrite=True) cidrs = self.mgr.find(ctx, resource.ExternalSubnet, tenant_name=ext_net_db.tenant_name, l3out_name=ext_net_db.l3out_name, external_network_name=ext_net_db.name) cidrs = [c.cidr for c in cidrs] self._manage_external_subnets(ctx, clone_ext_net, cidrs) # Set this item as a clone if not self.db.get(ctx, clone_l3out): self.db.set(ctx, l3out, clone_l3out) return clone_ext_net def _delete_shadow(self, ctx, ext_net, vrf): l3out = self.mgr.get(ctx, self._ext_net_to_l3out(ext_net)) clone_l3out = resource.L3Outside( tenant_name=vrf.tenant_name, name=self._generate_l3out_name(l3out, vrf)[0]) clone_ext_net = resource.ExternalNetwork( tenant_name=clone_l3out.tenant_name, l3out_name=clone_l3out.name, name=ext_net.name) with ctx.store.begin(subtransactions=True): self._delete_ext_net(ctx, clone_ext_net) self._delete_unused_l3out(ctx, clone_l3out) def _find_l3out_clones(self, ctx, l3outside): clone_keys = self.db.get_clones(ctx, l3outside) return [resource.L3Outside(tenant_name=x[0], name=x[1]) for x in clone_keys] def _delete_unused_l3out(self, ctx, l3out): ens = self.mgr.find(ctx, resource.ExternalNetwork, tenant_name=l3out.tenant_name, l3out_name=l3out.name) if not ens: self._delete_l3out(ctx, l3out, delete_epg=False) class EdgeNatStrategy(DistributedNatStrategy): """Edge NAT Strategy. Provides external connectivity with network address translation (DNAT/SNAT) where the translation is centralized in a node at the edge of the fabric. """ def connect_vrf(self, ctx, external_network, vrf, external_cidrs=None): """Allow external connectivity to VRF. Create shadow L3Outside for L3Outside-VRF combination in VRF's tenant, if required. Create ExternalNetwork and ExternalSubnet in the shadow L3Out, if required. Set vrf_name of shadow L3Outside to VRF. """ with ctx.store.begin(subtransactions=True): return self._create_shadow(ctx, external_network, vrf, with_nat_epg=False) def _make_l3out_clone(self, ctx, l3out, vrf): clone_l3out = super(EdgeNatStrategy, self)._make_l3out_clone( ctx, l3out, vrf) # TODO(amitbose) modify the clone_l3out node-profile etc return clone_l3out
py
1a3ae61e8034343a8780cf14547adb105e6c4aea
from lettuceUI import LettuceUI __all__ = ['LettuceUI' ]
py
1a3ae670a1ccab783f980fcec16a4f0ccab3f348
#!/usr/bin/python # # Copyright 2018-2022 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import click from polyaxon import settings from polyaxon.api import POLYAXON_CLOUD_HOST from polyaxon.logger import clean_outputs from polyaxon.utils.formatting import Printer from polyaxon.utils.http_utils import clean_host def get_dashboard_url( base: str = "ui", subpath: str = "", use_cloud: bool = False ) -> str: host = POLYAXON_CLOUD_HOST if use_cloud else clean_host(settings.CLIENT_CONFIG.host) dashboard_url = "{}/{}/".format(host, base) if subpath: return "{}{}/".format(dashboard_url, subpath.rstrip("/")) return dashboard_url def get_dashboard(dashboard_url: str, url_only: bool, yes: bool): if url_only: Printer.print_header("The dashboard is available at: {}".format(dashboard_url)) sys.exit(0) if yes or click.confirm( "Dashboard page will now open in your browser. Continue?", default=True, ): click.launch(dashboard_url) @click.command() @click.option( "--yes", "-y", is_flag=True, default=False, help="Automatic yes to prompts. " 'Assume "yes" as answer to all prompts and run non-interactively.', ) @click.option( "--url", is_flag=True, default=False, help="Print the url of the dashboard." ) @clean_outputs def dashboard(yes, url): """Open dashboard in browser.""" get_dashboard(dashboard_url=get_dashboard_url(), url_only=url, yes=yes)
py
1a3ae743b6b253fcadee460f956c1b67b89fa0ef
# -*- coding: utf-8 -*- """ Created on Thu Jun 20 22:04:17 2019 @author: Jiupeng """ #Modified from Jiupeng Hu #from __future__ import print_function from collections import defaultdict import os import sys import datetime def decdeg2dms(dd): deg, mnt = divmod(dd*60.0, 60) return int(deg), mnt def gen_sta_hypo(stationin): output = 'station.dat' g = open(output,'w') with open(stationin, 'r') as fr: for line in fr.readlines(): line = line.strip().split() latD, latM = decdeg2dms(float(line[1])) lonD, lonM = decdeg2dms(float(line[0])) for channel in ['001', '002', '003']: g.write('{:<5s} {:2s} {:3s} {:3d} {:7.4f} {:3d} {:7.4f}E{:4d}\n'.format(line[3], line[2], channel ,latD, latM, lonD, lonM, int(float(line[5])*1000))) #g.write('{:<5s} {:2s} {:3s} {:3d} {:7.4f} {:3d} {:7.4f}E{:4d}\n'.format(line[3], line[2], channel ,latD, latM, lonD, lonM, 0)) class Event(object): def __init__(self,line): eventParts = line.split() # assert len(eventParts) == 10 self.no = eventParts[0] self.year = eventParts[1] self.month = eventParts[2] self.day = eventParts[3] self.stime = eventParts[4] # time string self.dtime = eventParts[5] # time delta to 00:00 self.std = eventParts[6] self.lat = eventParts[7] self.lon = eventParts[8] self.depth = eventParts[9] self.mag = eventParts[10] self.stations = set([]) def setSta(self, sta): self.sta = sta def setPicks(self, stationPicks): self.stationPicks = stationPicks def __repr__(self): return ' '.join([self.year, self.month, self.day, self.stime, self.lat+self.lon]) class Pick(object): def __init__(self, line): phaseParts = line.split() self.net = phaseParts[0] self.staN = phaseParts[1] self.sta = '.'.join([self.net, self.staN]) self.phase = phaseParts[2] self.dtime = phaseParts[3] # time to 00:00 self.ttime = phaseParts[4] # travel time self.pamp = phaseParts[5] # P phase amplitude self.error = phaseParts[6] # travel time errors from taup_time def __str__(self): return self.net+self.sta+self.dtime def __repr__(self): return ' '.join([self.net, self.sta,self.phase,self.ttime, self.dtime, self.pamp, self.error]) def isEqLine(line): if line[19] in ['P','S']: return False else: return True class SeismicReport(object): def __init__(self, eventsFile): self.events = [] self.readEventsFile(eventsFile) def readEventsFile(self, eventsFile): eventNo = 0 stationPicks = [] with open(eventsFile, 'r') as f: line = f.readline() # Process first line particularly while line: # if line[0].isspace(): # Event line start with spaces if isEqLine(line): pickNo = 0 eventNo += 1 eventTemp = Event(line) line = f.readline() break else: line = f.readline() while line: # if line[0].isspace(): if isEqLine(line): if stationPicks: eventTemp.setPicks(stationPicks) self.events.append(eventTemp) pickNo = 0 eventNo += 1 stationPicks = [] eventTemp = Event(line) # elif line.startswith(stationPrompt): # elif line[0].isalpha(): elif not isEqLine(line): pickNo += 1 pickTemp = Pick(line) eventTemp.stations.add(pickTemp.sta) stationPicks.append(pickTemp) else: print('Error!') line = f.readline() eventTemp.setPicks(stationPicks) self.events.append(eventTemp) def makeCatlog(self, phases=['P', 'Pg']): for event in self.events: for pick in event.stationPicks: if pick.phase.strip() in phases: self.show([pick.net, pick.sta, pick.phase, event.year, event.month, event.day, pick.dtime, event.lon, event.lat, event.depth]) print("Eq: ",event.no) def makeHypoPhase(self): # Event format from "Summary header format Y2000" eventFormat="{:4s}{:2s}{:2s}{:02d}{:02d}{:02d}{:02d}{:02d} {:>2d}{:02d}{:>3d}E{:>2d}{:02d}{:>3d}{:02d} {:1s}{:>1d}{:02d}" for event in self.events: # if len(event.stations) < 15: # continue hour, minute, second = event.stime.split(':') mag1,mag2 = event.mag.split('.') mag1 = int(mag1) mag2 = int(mag2[0:2]) if float(event.mag) < 0: mag1 = 0 mag2 = 0 sec1 = second[0:2] sec2 = second[3:5] lat1, lat2, lat3 = self.processLatLon(event.lat) lon1, lon2, lon3 = self.processLatLon(event.lon) dep1, dep2 = self.processDep(event.depth) # print("0123456789012345678901234567890123456789012345678901234567890") print(eventFormat.format(event.year,event.month,event.day,int(hour),int(minute),int(sec1),int(sec2),lat1,lat2,lat3,lon1,lon2,lon3,dep1,dep2,'L',mag1,mag2)) otimeStr = event.year+event.month+event.day+" "+event.stime otime = datetime.datetime.strptime(otimeStr, '%Y%m%d %H:%M:%S.%f') baseTime = otime - datetime.timedelta(seconds=otime.second, microseconds=otime.microsecond) tmpPicks = event.stationPicks.copy() for sta in event.stations: p_flag = False p_label = ' ' s_flag = False s_lable = ' ' sta_code = "001" tRes = ' ' pWeight = ' ' pSec = '' sSec ='' sec1 = '' sec2 = '' for pick in tmpPicks[::-1]: if pick.sta == sta: if pick.phase == 'P': p_travel_time = pick.ttime p_flag = True if pick.phase == 'S': s_travel_time = pick.ttime s_flag = True if p_flag: ptime = otime + datetime.timedelta(seconds=float(p_travel_time)) pDelta = ptime - baseTime p_label = 'P' # if(int(pDelta.seconds) > 100): # print('Error') pSec = f'{pDelta.seconds:0>2}' sec1 = f'{int(pDelta.microseconds/10000):0>2}' if s_flag: stime = otime + datetime.timedelta(seconds=float(s_travel_time)) sDelta = stime - baseTime s_lable = 'S' # if(int(sDelta.seconds) > 100): # print('Error') sSec = f'{sDelta.seconds:0>2}' sec2 = f'{int(sDelta.microseconds/10000):0>2}' phaseFormat="{:<5s}{:2s} {:3s} {:1s} {:4d}{:02d}{:02d}{:02d}{:02d}{:>3s}{:2s}{:4s}{:3s}{:>3s}{:2s} {:1s}" print(phaseFormat.format( sta.split('.')[1], sta.split('.')[0], sta_code, p_label, baseTime.year, baseTime.month,baseTime.day,baseTime.hour,baseTime.minute, pSec, sec1, tRes, pWeight, sSec, sec2, s_lable )) print('') # print("0123456789012345678901234567890123456789012345678901234567890") def processLatLon(self, value:float): value1, tmp = divmod(float(value)*60.0, 60) try: value2 = str(tmp).split('.')[0] except: value2 = 0 try: value3 = str(tmp).split('.')[1][0:2] if len(value3) == 1: value3 = int(value3)*10 except: value3 = 0 return int(value1), int(value2), int(value3) def processDep(self, depth): try: dep1 = str(depth).split('.')[0] except: dep1 = 0 try: dep2 = str(depth).split('.')[1][0:2] if len(dep2) == 1: dep2 = int(dep2)*10 except: dep2 = 0 return int(dep1), int(dep2) def show(self, list): outStr = '' for item in list: outStr = outStr + item + ' ' print(outStr) if __name__ == '__main__': # test = SeismicReport('phase_sel_one') if len(sys.argv) != 3: print('mk_input.py phasefile stationfile') sys.exit() gen_sta_hypo(sys.argv[2]) test = SeismicReport(sys.argv[1]) #test.makeCatlog() test.makeHypoPhase()
py
1a3ae879685fa55e7ff0660f22793d537dfebc1d
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ... import opcodes as OperandDef from ..datasource import tensor as astensor from .core import TensorComplexFFTNMixin, validate_fftn, TensorStandardFFTN class TensorIFFTN(TensorStandardFFTN, TensorComplexFFTNMixin): _op_type_ = OperandDef.IFFTN def __init__(self, shape=None, axes=None, norm=None, dtype=None, **kw): super().__init__(_shape=shape, _axes=axes, _norm=norm, _dtype=dtype, **kw) def ifftn(a, s=None, axes=None, norm=None): """ Compute the N-dimensional inverse discrete Fourier Transform. This function computes the inverse of the N-dimensional discrete Fourier Transform over any number of axes in an M-dimensional tensor by means of the Fast Fourier Transform (FFT). In other words, ``ifftn(fftn(a)) == a`` to within numerical accuracy. For a description of the definitions and conventions used, see `mt.fft`. The input, analogously to `ifft`, should be ordered in the same way as is returned by `fftn`, i.e. it should have the term for zero frequency in all axes in the low-order corner, the positive frequency terms in the first half of all axes, the term for the Nyquist frequency in the middle of all axes and the negative frequency terms in the second half of all axes, in order of decreasingly negative frequency. Parameters ---------- a : array_like Input tensor, can be complex. s : sequence of ints, optional Shape (length of each transformed axis) of the output (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). This corresponds to ``n`` for ``ifft(x, n)``. Along any axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified by `axes` is used. See notes for issue on `ifft` zero padding. axes : sequence of ints, optional Axes over which to compute the IFFT. If not given, the last ``len(s)`` axes are used, or all axes if `s` is also not specified. Repeated indices in `axes` means that the inverse transform over that axis is performed multiple times. norm : {None, "ortho"}, optional Normalization mode (see `mt.fft`). Default is None. Returns ------- out : complex Tensor The truncated or zero-padded input, transformed along the axes indicated by `axes`, or by a combination of `s` or `a`, as explained in the parameters section above. Raises ------ ValueError If `s` and `axes` have different length. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- mt.fft : Overall view of discrete Fourier transforms, with definitions and conventions used. fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. ifft : The one-dimensional inverse FFT. ifft2 : The two-dimensional inverse FFT. ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning of tensor. Notes ----- See `mt.fft` for definitions and conventions used. Zero-padding, analogously with `ifft`, is performed by appending zeros to the input along the specified dimension. Although this is the common approach, it might lead to surprising results. If another form of zero padding is desired, it must be performed before `ifftn` is called. Examples -------- >>> import mars.tensor as mt >>> a = mt.eye(4) >>> mt.fft.ifftn(mt.fft.fftn(a, axes=(0,)), axes=(1,)).execute() array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt >>> n = mt.zeros((200,200), dtype=complex) >>> n[60:80, 20:40] = mt.exp(1j*mt.random.uniform(0, 2*mt.pi, (20, 20))) >>> im = mt.fft.ifftn(n).real >>> plt.imshow(im.execute()) <matplotlib.image.AxesImage object at 0x...> >>> plt.show() """ a = astensor(a) axes = validate_fftn(a, s=s, axes=axes, norm=norm) op = TensorIFFTN(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.complex_)) return op(a)
py
1a3aea17f846ffaea3ed95e354a0578136a3d103
#!/usr/bin/python # -*- coding: utf-8 -*- """ This script fixes links that contain common spelling mistakes. This is only possible on wikis that have a template for these misspellings. Command line options: -always:XY instead of asking the user what to do, always perform the same action. For example, XY can be "r0", "u" or "2". Be careful with this option, and check the changes made by the bot. Note that some choices for XY don't make sense and will result in a loop, e.g. "l" or "m". -start:XY goes through all misspellings in the category on your wiki that is defined (to the bot) as the category containing misspelling pages, starting at XY. If the -start argument is not given, it starts at the beginning. -main only check pages in the main namespace, not in the talk, wikipedia, user, etc. namespaces. """ # (C) Daniel Herding, 2007 # (C) Pywikibot team, 2007-2016 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals __version__ = '$Id: 1a3aea17f846ffaea3ed95e354a0578136a3d103 $' # import pywikibot from pywikibot import i18n, pagegenerators from pywikibot.tools import PY2 from scripts.solve_disambiguation import DisambiguationRobot if not PY2: basestring = (str, ) HELP_MSG = """\n mispelling.py does not support site {site}. Help Pywikibot team to provide support for your wiki by submitting a bug to: https://phabricator.wikimedia.org/maniphest/task/create/?projects=pywikibot-core with category containing misspelling pages or a template for these misspellings.\n""" class MisspellingRobot(DisambiguationRobot): """Spelling bot.""" misspellingTemplate = { 'de': ('Falschschreibung', 'Obsolete Schreibung'), } # Optional: if there is a category, one can use the -start # parameter. misspellingCategory = { 'da': u'Omdirigeringer af fejlstavninger', # only contains date redirects at the moment 'de': ('Kategorie:Wikipedia:Falschschreibung', 'Kategorie:Wikipedia:Obsolete Schreibung'), 'en': u'Redirects from misspellings', 'hu': u'Átirányítások hibás névről', 'nl': u'Categorie:Wikipedia:Redirect voor spelfout', } def __init__(self, always, firstPageTitle, main_only): """Constructor.""" super(MisspellingRobot, self).__init__( always, [], True, False, None, False, main_only) self.generator = self.createPageGenerator(firstPageTitle) def createPageGenerator(self, firstPageTitle): """ Generator to retrieve misspelling pages or misspelling redirects. @rtype: generator """ mylang = self.site.code if mylang in self.misspellingCategory: categories = self.misspellingCategory[mylang] if isinstance(categories, basestring): categories = (categories, ) generators = ( pagegenerators.CategorizedPageGenerator( pywikibot.Category(self.site, misspellingCategoryTitle), recurse=True, start=firstPageTitle) for misspellingCategoryTitle in categories) elif mylang in self.misspellingTemplate: templates = self.misspellingTemplate[mylang] if isinstance(templates, basestring): templates = (templates, ) generators = ( pagegenerators.ReferringPageGenerator( pywikibot.Page(self.site, misspellingTemplateName, ns=10), onlyTemplateInclusion=True) for misspellingTemplateName in templates) if firstPageTitle: pywikibot.output( u'-start parameter unsupported on this wiki because there ' u'is no category for misspellings.') else: pywikibot.output(HELP_MSG.format(site=self.site)) empty_gen = (i for i in []) return empty_gen generator = pagegenerators.CombinedPageGenerator(generators) preloadingGen = pagegenerators.PreloadingGenerator(generator) return preloadingGen def findAlternatives(self, disambPage): """ Append link target to a list of alternative links. Overrides the DisambiguationRobot method. @return: True if alternate link was appended @rtype: bool or None """ if disambPage.isRedirectPage(): self.alternatives.append(disambPage.getRedirectTarget().title()) return True if self.misspellingTemplate.get(disambPage.site.code) is not None: for template, params in disambPage.templatesWithParams(): if (template.title(withNamespace=False) == self.misspellingTemplate[disambPage.site.code]): # The correct spelling is in the last paramter. correctSpelling = params[-1] # On de.wikipedia, there are some cases where the # misspelling is ambigous, see for example: # https://de.wikipedia.org/wiki/Buthan for match in self.linkR.finditer(correctSpelling): self.alternatives.append(match.group('title')) if not self.alternatives: # There were no links in the parameter, so there is # only one correct spelling. self.alternatives.append(correctSpelling) return True def setSummaryMessage(self, disambPage, *args, **kwargs): """ Setup the summary message. Overrides the DisambiguationRobot method. """ # TODO: setSummaryMessage() in solve_disambiguation now has parameters # new_targets and unlink. Make use of these here. self.comment = i18n.twtranslate(self.site, 'misspelling-fixing', {'page': disambPage.title()}) def main(*args): """ Process command line arguments and invoke bot. If args is an empty list, sys.argv is used. @param args: command line arguments @type args: list of unicode """ # the option that's always selected when the bot wonders what to do with # a link. If it's None, the user is prompted (default behaviour). always = None main_only = False firstPageTitle = None for arg in pywikibot.handle_args(args): arg, sep, value = arg.partition(':') if arg == '-always': always = value elif arg == '-start': firstPageTitle = value or pywikibot.input( 'At which page do you want to start?') elif arg == '-main': main_only = True bot = MisspellingRobot(always, firstPageTitle, main_only) bot.run() if __name__ == "__main__": main()
py
1a3aea2fce3029eee3b00d8c61dbc54f43788e70
# coding=utf8 import unittest from l20n.migrate.changesets import convert_blame_to_changesets class TestBlameToChangesets(unittest.TestCase): def test_convert(self): blame = { 'authors': [ 'A', 'B' ], 'blame': { 'path/one': { 'key1': [0, 1346095921.0], 'key2': [1, 1218121409.0] }, 'path/two': { 'key1': [1, 1440596526.0], 'key3': [0, 1346095921.0] } } } expected = [ { 'author': 'B', 'first_commit': 1218121409.0, 'changes': { ('path/one', 'key2'), ('path/two', 'key1'), } }, { 'author': 'A', 'first_commit': 1346095921.0, 'changes': { ('path/one', 'key1'), ('path/two', 'key3'), } }, ] self.assertEqual( convert_blame_to_changesets(blame), expected )
py
1a3aea401d5d04b7e96b7fc537e821774bba36ef
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ast import logging from typing import Dict, Iterable, List from .annotated_function_generator import ( AnnotatedFunctionGenerator, FunctionVisitor, ) from .decorator_parser import DecoratorParser from .generator_specifications import DecoratorAnnotationSpecification from .model import FunctionDefinitionModel LOG: logging.Logger = logging.getLogger(__name__) class FreeFunctionWithDecoratorVisitor(FunctionVisitor): def __init__( self, target_decorators: List[DecoratorAnnotationSpecification] ) -> None: super().__init__() self.decorator_parsers: Dict[ DecoratorAnnotationSpecification, DecoratorParser ] = { target_decorator: DecoratorParser(target_decorator.decorator) for target_decorator in target_decorators } def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None: for decorator_specification, parser in self.decorator_parsers.items(): if parser.function_matches_target_decorators(node): self.found_functions[decorator_specification].append(node) def visit_FunctionDef(self, node: ast.FunctionDef) -> None: for decorator_specification, parser in self.decorator_parsers.items(): if parser.function_matches_target_decorators(node): self.found_functions[decorator_specification].append(node) def visit_ClassDef(self, node: ast.ClassDef) -> None: # We only want free functions, so we should stop traversing the # tree once we see a class definition pass class AnnotatedFreeFunctionWithDecoratorGenerator(AnnotatedFunctionGenerator): def _annotate_functions(self, path: str) -> Iterable[FunctionDefinitionModel]: visitor = FreeFunctionWithDecoratorVisitor(self.annotation_specifications) return self._annotate_functions_with_visitor(path, visitor)
py
1a3aec254b8fed6d7abc42ed09b2078d3481c5df
# /usr/bin/env python3.5 # -*- mode: python -*- # ============================================================================= # @@-COPYRIGHT-START-@@ # # Copyright (c) 2020, Qualcomm Innovation Center, Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # SPDX-License-Identifier: BSD-3-Clause # # @@-COPYRIGHT-END-@@ # ============================================================================= """ Utilities for parsing and applying quantsim configurations from json config file """ from abc import ABC, abstractmethod from typing import Dict, List from aimet_common.defs import QuantizationDataType, QuantDtypeBwInfo from aimet_common.connected_graph.operation import Op from aimet_common.graph_pattern_matcher import PatternType from aimet_common.quantsim_config.json_config_importer import JsonConfigImporter, ConfigDictKeys, DefaultsType, \ ParamType, OpTypeType, SupergroupType, ConfigType from aimet_common.utils import AimetLogger logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant) # -------------------------------------------------------------------------------------------------------------------- # Overriding AIMET QuantSim data type and bit-width using supported_kernels specified in target driven config file. # -------------------------------------------------------------------------------------------------------------------- # supported_kernels can be specified at default as well as op level in a given target specific config file # Example rule in the target specific config file is as below: # "supported_kernels": [ # { # "activation": { # "bitwidth": 16, # "dtype": "int" # }, # "param": { # "bitwidth": 16, # "dtype": "int" # } # }, # { # "activation": { # "bitwidth": 16, # "dtype": "float" # }, # "param": { # "bitwidth": 16, # "dtype": "float" # } # } # ] # supported_kernels includes data type and bit-width options for activation and param quantization # applied together as a pair. In above rule act and param can be set to [int16, int16] OR [FP16, FP16] # supported_kernels can be used to enforce target driven data type and bit-width during AIMET Quantsim # by setting: ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True # # AIMET Quantsim is created with specific defaults for data type/ bit-width using: # default_data_type default_output_bw and default_param_bw arguments as below : # sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, # config_file='./data/quantsim_config.json', # dummy_input=torch.rand(1, 3, 32, 32), in_place=True, # default_data_type=QuantizationDataType.int, # default_output_bw=8, default_param_bw=8) # Rules for override : # (i) If a given QuantSim default data type and bit-width is found at either the default or op-level # supported_kernels list : override shall NOT be applied. # (ii) AIMET supports overrides ONLY when a lower precision kernel is unavailable. # For example : # a) QuantSim default set to int 8, op level supported_kernels only has FP 16 available --> override supported # b) QuantSim default set to int 8, op level supported_kernels only has int 4 available --> override NOT supported # # -------------------------------------------------------------------------------------------------------------------- # Flag to enforce target configs for data type and bit-width for params and activation. ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX = 0 class SupergroupConfigCallback(ABC): """ Class acting as a callback for when supergroups are found """ def __init__(self): pass @abstractmethod def __call__(self, _, op_list: List[Op]): """ Callback logic """ class OnnxConnectedGraphTypeMapper: """ Class maintaining dictionaries for two way mapping from onnx types to connected graph types """ def __init__(self, type_pairs: List[List[List[str]]]): self._onnx_to_conn_graph_dict = {} self._conn_graph_to_onnx_dict = {} for onnx_types, conn_graph_types in type_pairs: for onnx_type in onnx_types: self._onnx_to_conn_graph_dict[onnx_type] = conn_graph_types for conn_graph_type in conn_graph_types: self._conn_graph_to_onnx_dict[conn_graph_type] = onnx_types def get_conn_graph_type_from_onnx_type(self, onnx_type: str): """ Return connected graph type corresponding to onnx type :param onnx_type: Onnx type to find corresponding connected graph type :return: Connected graph type corresponding to onnx_type """ return self._onnx_to_conn_graph_dict.get(onnx_type) def get_onnx_type_from_conn_graph_type(self, conn_graph_type: str): """ Return onnx type corresponding to connected graph type :param conn_graph_type: Connected graph type to find corresponding onnx type :return: Onnx type corresponding to conn_graph_type """ return self._conn_graph_to_onnx_dict.get(conn_graph_type) class QuantSimConfigurator(ABC): """ Class for parsing and applying quantsim configurations from json config file """ def __init__(self, config_file: str): self._quantsim_configs = JsonConfigImporter.import_json_config_file(config_file) def _set_quantsim_configs(self): """ Apply quantsim configurations to the given model """ self._set_default_configs(self._quantsim_configs[ConfigDictKeys.DEFAULTS]) self._set_param_configs(self._quantsim_configs[ConfigDictKeys.PARAMS]) self._set_op_type_configs(self._quantsim_configs[ConfigDictKeys.OP_TYPE]) self._set_supergroup_configs(self._quantsim_configs[ConfigDictKeys.SUPERGROUPS]) self._set_model_input_configs(self._quantsim_configs[ConfigDictKeys.MODEL_INPUT]) self._set_model_output_configs(self._quantsim_configs[ConfigDictKeys.MODEL_OUTPUT]) def check_correctness_of_dtype_bw_rules(self, quantsim_dtype_bw_info: QuantDtypeBwInfo): """ Validates correctness of data type and bitdiwth rules specified using config file supported_kernels option. :param quantsim_dtype_bw_info: data type (int or float) as QuantizationDataType and act/param bit-width info. :return: """ # validation rules: # AIMET supports overrides ONLY when a lower precision kernel is unavailable. # for example : # 1) (default) int 8, but only FP16 kernel is available for a given op type --> override supported # 2) (default) int 8, but only int 4 kernel is available is available for a given op type --> override not supported default_config = self._quantsim_configs[ConfigDictKeys.DEFAULTS] default_valid = False op_level_valid = False # user has provided default supported kernel options if ConfigDictKeys.SUPPORTED_KERNELS in default_config: default_supported_kernels = default_config[ConfigDictKeys.SUPPORTED_KERNELS] # quantsim dtype/bw found in default supported kernels if current_config_in_supported_kernels(quantsim_dtype_bw_info, default_supported_kernels) and \ is_current_config_same_as_override_option(quantsim_dtype_bw_info, default_supported_kernels): default_valid = True # default level override is not required logger.info("Quantsim config found in default supported kernels, " "skipping default level dtype and bitwidth override") else: # override is required, first validate the override option # if valid, update default dtype, bw to be used to validate op level overrides. if is_override_dtype_bw_valid(get_override_from_supported_kernels(default_supported_kernels), quantsim_dtype_bw_info): default_valid = True quantsim_dtype_bw_info = get_override_from_supported_kernels(default_supported_kernels) else: logger.error(' Default supported_kernels override check failed, one way to rectify is to include \n' ' default quantsim data type and bit-width {act_bw = %s, param_bw = %s, data_type = %s} \n ' ' in supported_kernels list under default section of target specific config file \n', quantsim_dtype_bw_info.act_bw, quantsim_dtype_bw_info.param_bw, quantsim_dtype_bw_info.data_type) raise NotImplementedError else: # user has not provided default supported_kernels, log quantsim defaults treated as default target kernel support default_valid = True logger.info(' Default supported_kernels not specified in given target specific config file. \n' ' Using default quantsim data type and bit-width {act_bw = %s, param_bw = %s, data_type = %s} \n ' ' as default target support\n', quantsim_dtype_bw_info.act_bw, quantsim_dtype_bw_info.param_bw, quantsim_dtype_bw_info.data_type) # in either case, validate op level override options if self._quantsim_configs[ConfigDictKeys.OP_TYPE]: op_level_valid = validate_all_op_level_dtype_bw_overrides(self._quantsim_configs[ConfigDictKeys.OP_TYPE], quantsim_dtype_bw_info) return default_valid and op_level_valid @abstractmethod def _set_default_configs(self, default_configs: DefaultsType): """ Set default configurations for op and param quantizers in model (first level of specificity in configuration file) :param default_configs: Default configurations for quantizers """ @abstractmethod def _set_param_configs(self, param_configs: ParamType): """ Set configurations for all params of specific types (second level of specificity in configuration file) :param param_configs: Dictionary containing configurations for parameters of certain types """ @abstractmethod def _set_op_type_configs(self, op_configs: OpTypeType): """ Set configurations for all ops of specific types (third level of specificity in configuration file) :param op_configs: Dictionary containing configurations for ops of certain types """ @classmethod def _build_supergroup_patterns(cls, supergroup_config: SupergroupType, callback: SupergroupConfigCallback, onnx_conn_graph_type_mapper: OnnxConnectedGraphTypeMapper) \ -> List[PatternType]: """ Create a list holding pattern types corresponding to sequences specified in the supergroup config :param supergroup_config: Quantsim wrapper configurations for supergroup ops :return: List of PatternTypes holding supergroup ops and callback for when the supergroup is found """ op_list = supergroup_config[ConfigDictKeys.OP_LIST] list_of_permutations = _build_list_of_permutations(op_list, onnx_conn_graph_type_mapper) list_of_patterns = [] for permutation in list_of_permutations: list_of_patterns.append(PatternType(pattern=permutation, action=callback)) return list_of_patterns @abstractmethod def _set_supergroup_configs(self, supergroups_configs: List[SupergroupType]): """ Set supergroup specific configurations (fourth level of specificity in configuration file) :param supergroups_configs: Configurations for supergroups """ @abstractmethod def _set_model_input_configs(self, model_input_configs: ConfigType): """ Set model input specific configurations (fifth level of specificity in configuration file) :param model_input_configs: Configuration for model inputs """ @abstractmethod def _set_model_output_configs(self, model_output_configs: ConfigType): """ Set model output specific configurations (sixth level of specificity in configuration file) :param model_output_configs: Configuration for model outputs """ def _build_list_of_permutations(op_list: List[str], onnx_conn_graph_type_mapper: OnnxConnectedGraphTypeMapper) \ -> List[List[str]]: """ Given a list of onnx op types, where each onnx op type could potentially map to multiple connected graph types, create a list of all permutations of lists of connected graph types that would satisfy the same ordering as the original onnx op type list. For example, for an onnx op type "o1" that maps to two connected graph types "c1_1" and "c1_2", and an onnx op type "o2" that maps to two connected graph types "c2_1" and "c2_2", all permutations of ["o1", "o2"] would lead to ["c1_1", "c2_1"], ["c1_1", "c2_2"], ["c1_2", "c2_1"], and ["c1_2", "c2_2"]. :param op_list: List of onnx op types :param onnx_conn_graph_type_mapper: Class that provides utilities for mapping onnx op types to connected graph types :return: List of permutations of connected graph op types satisfying the ordering specified by op_list onnx types """ # base case, return list of lists of connected graph ops corresponding to the only op in the list if len(op_list) == 1: permutations_of_op_list = [] conn_graph_types_of_current_op = onnx_conn_graph_type_mapper.get_conn_graph_type_from_onnx_type(op_list[0]) for op in conn_graph_types_of_current_op: permutations_of_op_list.append([op]) return permutations_of_op_list permutations_of_op_list = [] permutations_of_succeeding_ops = _build_list_of_permutations(op_list[1:], onnx_conn_graph_type_mapper) conn_graph_types_of_current_op = onnx_conn_graph_type_mapper.get_conn_graph_type_from_onnx_type(op_list[0]) for op in conn_graph_types_of_current_op: for permutation in permutations_of_succeeding_ops: new_permutation = [op] + permutation permutations_of_op_list.append(new_permutation) return permutations_of_op_list def get_setting_type(setting_name: str) -> str: """ Return a string corresponding to the type of setting that is specified by setting_name. :param setting_name: Name of the setting to change :return: String corresponding to the type of setting that is specified by setting_name. """ if setting_name in [ConfigDictKeys.IS_INPUT_QUANTIZED, ConfigDictKeys.IS_OUTPUT_QUANTIZED]: return ConfigDictKeys.IS_QUANTIZED if setting_name == ConfigDictKeys.IS_SYMMETRIC: return ConfigDictKeys.IS_SYMMETRIC logger.error('Unrecognized quantizer setter name %s', setting_name) raise AssertionError def get_all_ops_in_neighborhood(op: Op, direction: str, neighborhood=None): """ Given an op and a direction, populate neighborhood dictionary with all ops adjacent to that op, and which direction they are adjacent in. If a neighboring op has other connections in the same direction as the op we began with, ops connecting to the neighboring op in those other connections will also be part of the same neighborhood. :param op: Op to find neighboring ops from :param direction: Direction to search for neighboring ops (will be 'input' or 'output') :param neighborhood: Dictionary mapping neighboring ops to the direction which they connect to op. """ if neighborhood is None: neighborhood = {} neighborhood[op] = direction if direction == 'input' and op.inputs: input_products = [inp for inp in op.inputs if inp.is_inter_module()] input_ops = [inp.producer for inp in input_products] for input_op in input_ops: if input_op not in neighborhood: neighborhood[input_op] = 'output' if input_op.type == 'Split': # Neighborhood ops include input of split, as well as all other consumers of split get_all_ops_in_neighborhood(input_op, 'input', neighborhood) get_all_ops_in_neighborhood(input_op, 'output', neighborhood) elif op.output: output_ops = [consumer for consumer in op.output.consumers] for output_op in output_ops: if output_op not in neighborhood: neighborhood[output_op] = 'input' if output_op.type == 'Split': # Neighborhood ops include all consumers of split get_all_ops_in_neighborhood(output_op, 'output', neighborhood) return neighborhood def current_config_in_supported_kernels(current_dtype_bw: QuantDtypeBwInfo, supported_kernels: List) -> bool: """ Checks if given bw/dtype config is in (act, param) in supported kernels provided. :param current_dtype_bw : current data type and bitwidths for act and param as QuantDtypeBwInfo. :param supported_kernels: supported kernels (Default level in config file). :return: True, if current config is part of the supported Kernels, False otherwise. """ for supported_kernel_config in supported_kernels: # retrieve one set of act/param kernel config support act_config = supported_kernel_config[ConfigDictKeys.ACTIVATION] param_config = supported_kernel_config[ConfigDictKeys.PARAM] # we need to compare combination of act/param with default user provided config. # Because a given kernel support is valid only as a combination. if act_config[ConfigDictKeys.DTYPE] == current_dtype_bw.data_type and \ act_config[ConfigDictKeys.BITWIDTH] == current_dtype_bw.act_bw and \ param_config[ConfigDictKeys.DTYPE] == current_dtype_bw.data_type and \ param_config[ConfigDictKeys.BITWIDTH] == current_dtype_bw.param_bw: return True return False def is_current_config_same_as_override_option(current_dtype_bw: QuantDtypeBwInfo, supported_kernels: List) -> bool: """ Checks if given bw/dtype config is in (act, param) is same as supported kernel provided as an option at DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX. :param current_dtype_bw : current data type and bitwidths for act and param as QuantDtypeBwInfo. :param supported_kernels: supported kernels (Default level in config file). :return: True, if current config is supported Kernel at index specified by , False otherwise. """ override_dtype_bw = get_override_from_supported_kernels(supported_kernels) # we need to compare combination of act/param with default user provided config. # Because a given kernel support is valid only as a combination. if override_dtype_bw.data_type == current_dtype_bw.data_type and \ override_dtype_bw.act_bw == current_dtype_bw.act_bw and \ override_dtype_bw.data_type == current_dtype_bw.data_type and \ override_dtype_bw.param_bw == current_dtype_bw.param_bw: return True return False def get_override_from_supported_kernels(supported_kernels: Dict) -> QuantDtypeBwInfo: """ extracts the first option from list of supported kernels configured as QuantDtypeBwInfo. :param supported_kernels: Dictionary of supported kernels at default level. :return: """ assert supported_kernels config_file_default_act_bw_dtype_config = supported_kernels[DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX][ConfigDictKeys.ACTIVATION] config_file_default_param_bw_dtype_config = supported_kernels[DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX][ConfigDictKeys.PARAM] override_data_type = config_file_default_act_bw_dtype_config[ConfigDictKeys.DTYPE] override_act_bw = config_file_default_act_bw_dtype_config[ConfigDictKeys.BITWIDTH] override_param_bw = config_file_default_param_bw_dtype_config[ConfigDictKeys.BITWIDTH] return QuantDtypeBwInfo(override_data_type, override_act_bw, override_param_bw) def is_override_dtype_bw_valid(override_dtype_bw_info: QuantDtypeBwInfo, quantsim_dtype_bw_info: QuantDtypeBwInfo) -> bool: """ check if override dtype bw is valid given quantsim default dtype and bw. :param override_dtype_bw_info: override data type, bitwidth info as QuantDtypeBwInfo. :param quantsim_dtype_bw_info: quantsim default data type, bitwidth info as QuantDtypeBwInfo. :return: bool, True if override option is valid, False otherwise. """ # Rule : When an Op does NOT have lower precision kernel support, supported_kernels based override can be applied => # quantsim default dtype/bw should be lower precision compared to override. # case (i) if both are int or both are float dtype, compare bitwidths. # ex : {quantsim default = int16, override = int8} or {quantsim default = int8, override = int4} are not supported # case (ii) if quantsim default is float => override is not float, then it fails to satisfy criteria because: # quantsim defaults are higher precision compared to overrides . (ex : quantsim default = Fp16 > override = int) if (quantsim_dtype_bw_info.data_type == override_dtype_bw_info.data_type and (quantsim_dtype_bw_info.act_bw > override_dtype_bw_info.act_bw or quantsim_dtype_bw_info.param_bw > override_dtype_bw_info.param_bw)) or \ quantsim_dtype_bw_info.data_type == QuantizationDataType.float: logger.error(' Target specfic op level override only with a higher precision kernel is supported \n,' ' (please check both quantsim defaults and default supported_kernels in config file specified at override index {%s}) \n' ' quantsim is configured with {act_bw = %s, param_bw = %s, data_type = %s} and \n' ' supported_kernels override configured as {act_bw = %s, param_bw = %s, data_type = %s} \n', DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX, quantsim_dtype_bw_info.act_bw, quantsim_dtype_bw_info.param_bw, quantsim_dtype_bw_info.data_type, override_dtype_bw_info.act_bw, override_dtype_bw_info.param_bw, override_dtype_bw_info.data_type) return False return True def validate_all_op_level_dtype_bw_overrides(op_configs: OpTypeType, default_dtype_bw: QuantDtypeBwInfo): """ Checks if given op level supported_kernel is supported (across all op types). :param op_configs: Op level config information (Level 3 spec in target config file). :param default_dtype_bw: default values configured for quantsim data_type/ bitwidths. :return: bool, indicating valid or not. """ for op_name, op_config in op_configs.items(): if ConfigDictKeys.SUPPORTED_KERNELS in op_config: op_level_supported_kernels = op_config[ConfigDictKeys.SUPPORTED_KERNELS] # if current quantsim config or default level supported kernel is in op level supported kernels # no override required at op level. if current_config_in_supported_kernels(default_dtype_bw, op_level_supported_kernels): logger.info(" Default option found in op level supported kernels list, skip " "op level override needed for op {%s} \n", op_name) else: # If there are multiple options - we always override with DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX # in supported_kernels, check if the override option dtype and bitwidth is valid. # option specified at DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX of default supported_kernels # will be applied during override. override_dtype_bw_info = get_override_from_supported_kernels(op_level_supported_kernels) if not is_override_dtype_bw_valid(override_dtype_bw_info, default_dtype_bw): logger.info(' Op level supported_kernels override check failed for op {%s} \n' ' Op level override only with higher precision kernel is supported \n' ' (please check both quantsim defaults and default supported_kernels in config file specified at override index {%s})\n' ' One way to rectify this is to specify lower precision data type and bit-width as defaults ' ' \n ex : {act_bw = %s, param_bw = %s, data_type = %s} and' ' use op level supported_kernels override \n' ' for this op to indicate higher precision kernel that is supported on given target \n' ' ex: { act_bw = %s, param_bw = %s , data_type = %s} \n', op_name, DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX, override_dtype_bw_info.act_bw, override_dtype_bw_info.param_bw, override_dtype_bw_info.data_type, default_dtype_bw.act_bw, default_dtype_bw.param_bw, default_dtype_bw.data_type) raise NotImplementedError return True
py
1a3aed518340dc54cec3ab3b88404381b20fe6af
#!/usr/bin/env python # # Generate graphs of the results given the data generated by the experiment scripts. # Requires numpy and matplotlib import numpy as np import matplotlib.pyplot as plt ## Startup time and memory usage ## native_data = np.genfromtxt('ping-server-native.csv', delimiter=',', skip_header=1) native_time = native_data[:,0] native_mem = native_data[:, 1] jvm_data = np.genfromtxt('ping-server-jvm.csv', delimiter=',', skip_header=1) jvm_time = jvm_data[:,0] jvm_mem = jvm_data[:, 1] plt.plot(native_time, label='Native Image') plt.plot(jvm_time, label='JVM') plt.ylabel('Startup time (ms)') plt.xlabel('Run') plt.title("Time from Start to first HTTP Response") plt.legend() plt.savefig('startup-time.png') plt.show() plt.plot(native_mem, label='Native Image') plt.plot(jvm_mem, label='JVM') plt.ylabel('Memory usage (KB)') plt.xlabel('Run') plt.title("Resident Set Size after first HTTP Response") plt.legend() plt.savefig('resident-set-size.png') plt.show() ## Response times under sustained load ## native_image_sustained = np.genfromtxt('native-image-sustained.csv', delimiter=',', skip_header=1) jvm_cold_sustained = np.genfromtxt('jvm-cold-sustained.csv', delimiter=',', skip_header=1) jvm_warm_sustained = np.genfromtxt('jvm-warm-sustained-8.csv', delimiter=',', skip_header=1) plt.plot(native_image_sustained[0:99, 0], native_image_sustained[0:99,1], label="Native Image") plt.plot(jvm_cold_sustained[0:99,0], jvm_cold_sustained[0:99,1], label="JVM (Cold)") plt.plot(jvm_warm_sustained[0:99,0], jvm_warm_sustained[0:99,1], label="JVM (Warm)") plt.ylabel("Response time (ms)") plt.xlabel("Percentile") plt.title("0-99 Percentile response times over 10000 requests") plt.legend() plt.savefig('lower-sustained-response-time.png') plt.show() plt.plot(native_image_sustained[95:100, 0], native_image_sustained[95:100,1], label="Native Image") plt.plot(jvm_cold_sustained[95:100,0], jvm_cold_sustained[95:100,1], label="JVM (Cold)") plt.plot(jvm_warm_sustained[95:100,0], jvm_warm_sustained[95:100,1], label="JVM (Warm)") plt.ylabel("Response time (ms)") plt.xlabel("Percentile") plt.title("95-100 Percentile response times over 10000 requests") plt.legend() plt.savefig('upper-sustained-response-time.png') plt.show() ## JVM Response time over time jvm_warm_sustained_1 = np.genfromtxt('jvm-warm-sustained-1.csv', delimiter=',', skip_header=1) jvm_warm_sustained_2 = np.genfromtxt('jvm-warm-sustained-2.csv', delimiter=',', skip_header=1) jvm_warm_sustained_3 = np.genfromtxt('jvm-warm-sustained-3.csv', delimiter=',', skip_header=1) jvm_warm_sustained_4 = np.genfromtxt('jvm-warm-sustained-4.csv', delimiter=',', skip_header=1) jvm_warm_sustained_5 = np.genfromtxt('jvm-warm-sustained-5.csv', delimiter=',', skip_header=1) jvm_warm_sustained_6 = np.genfromtxt('jvm-warm-sustained-6.csv', delimiter=',', skip_header=1) jvm_warm_sustained_7 = np.genfromtxt('jvm-warm-sustained-7.csv', delimiter=',', skip_header=1) jvm_warm_sustained_8 = np.genfromtxt('jvm-warm-sustained-8.csv', delimiter=',', skip_header=1) plt.plot(jvm_warm_sustained_1[0:95,0], jvm_warm_sustained_1[0:95,1], label="After 10,000 requests") plt.plot(jvm_warm_sustained_2[0:95,0], jvm_warm_sustained_2[0:95,1], label="After 20,000 requests") plt.plot(jvm_warm_sustained_3[0:95,0], jvm_warm_sustained_3[0:95,1], label="After 30,000 requests") plt.plot(jvm_warm_sustained_4[0:95,0], jvm_warm_sustained_4[0:95,1], label="After 40,000 requests") plt.plot(jvm_warm_sustained_5[0:95,0], jvm_warm_sustained_5[0:95,1], label="After 50,000 requests") plt.plot(jvm_warm_sustained_6[0:95,0], jvm_warm_sustained_6[0:95,1], label="After 60,000 requests") plt.plot(jvm_warm_sustained_7[0:95,0], jvm_warm_sustained_7[0:95,1], label="After 70,000 requests") plt.plot(jvm_warm_sustained_8[0:95,0], jvm_warm_sustained_8[0:95,1], label="After 80,000 requests") plt.ylabel("Response time (ms)") plt.xlabel("Percentile") plt.title("JVM 0-95 Percentile response times over time") plt.legend() plt.savefig('warming-sustained-response-time.png') plt.show() ## Response times under sustained load for various JVM settings jvm_untuned = np.genfromtxt('jvm-untuned.csv', delimiter=',', skip_header=1) jvm_gc_tuned = np.genfromtxt('jvm-gc-tuned.csv', delimiter=',', skip_header=1) jvm_compiler_tuned = np.genfromtxt('jvm-compiler-tuned.csv', delimiter=',', skip_header=1) jvm_both_tuned = np.genfromtxt('jvm-both-tuned.csv', delimiter=',', skip_header=1) plt.plot(jvm_untuned[0:99, 0], jvm_untuned[0:99,1], label="Untuned JVM") plt.plot(jvm_gc_tuned[0:99,0], jvm_gc_tuned[0:99,1], label="GC tuned JVM") plt.plot(jvm_compiler_tuned[0:99,0], jvm_compiler_tuned[0:99,1], label="Compiler tuned JVM") plt.plot(jvm_both_tuned[0:99,0], jvm_both_tuned[0:99,1], label="Both tuned JVM") plt.ylabel("Response time (ms)") plt.xlabel("Percentile") plt.title("0-99 Percentile response times over 10000 requests") plt.legend() plt.savefig('lower-tuned-response-time.png') plt.show() plt.plot(jvm_untuned[95:100, 0], jvm_untuned[95:100,1], label="Untuned JVM") plt.plot(jvm_gc_tuned[95:100,0], jvm_gc_tuned[95:100,1], label="GC tuned JVM") plt.plot(jvm_compiler_tuned[95:100,0], jvm_compiler_tuned[95:100,1], label="Compiler tuned JVM") plt.plot(jvm_both_tuned[95:100,0], jvm_both_tuned[95:100,1], label="Both tuned JVM") plt.ylabel("Response time (ms)") plt.xlabel("Percentile") plt.title("95-100 Percentile response times over 10000 requests") plt.legend() plt.savefig('upper-tuned-response-time.png') plt.show()
py
1a3aedf299618f3134649c4f1a148cc289ff8f76
#!/usr/bin/python3 import sys import copy from pathlib import Path import numpy as np import pandas as pd import matplotlib.pyplot as plt from statsmodels.tsa.stattools import adfuller from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from statsmodels.tsa.statespace.sarimax import SARIMAX,SARIMAXResults,SARIMAXParams from statsmodels.tsa.statespace.mlemodel import MLEResults from predictor.utility import msg2log from clustgelDL.auxcfg import D_LOGS,listLogSet, closeLogs,log2All,exec_time,logList from offlinepred.api import logMatrix, plotPredictDF def readDataset(csv_file:str="", endogen:list=None, title:str="Time Series",f:object=None)->pd.DataFrame: my_csv=Path(csv_file) if my_csv.is_file(): df1=pd.read_csv(csv_file) if endogen is not None and len(endogen)>0: plot = df1.plot(y=endogen,figsize=(14,8),legend=True,title=title) title1=title.replace(' ','_') file_png=Path(D_LOGS['plot'] / Path(title1)).with_suffix('.png') fig=plot.get_figure() fig.savefig(str(file_png)) else: df1=None return df1 def checkStationarity(df:pd.DataFrame=None, data_col_name:str=None,title:str="Time Series",f:object=None): if df is None or data_col_name is None: return series=df[data_col_name].values # ADF Test result = adfuller(series, autolag='AIC') msg2log(None, f'ADF Statistic: {result[0]}',f) msg2log(None, f'n_lags: {result[1]}',f) msg2log(None,f'p-value: {result[1]}',f) for key, value in result[4].items(): msg2log(None,'Critial Values:',f) msg2log(None,f' {key}, {value}',f) plt.rcParams.update({'figure.figsize':(9,7), 'figure.dpi':120}) # Original Series fig, axes = plt.subplots(3, 3, sharex=True) axes[0, 0].plot(df[data_col_name]); axes[0, 0].set_title('Original Series') plot_acf(df[data_col_name], ax=axes[0, 1]) # 1st Differencing axes[1, 0].plot(df[data_col_name].diff()); axes[1, 0].set_title('1st Order Differencing') plot_acf(df[data_col_name].diff().dropna(), ax=axes[1, 1]) # 2nd Differencing axes[2, 0].plot(df[data_col_name].diff().diff()); axes[2, 0].set_title('2nd Order Differencing') plot_acf(df[data_col_name].diff().diff().dropna(), ax=axes[2, 1]) title1 = "{}_{}".format(title.replace(' ', '_'),data_col_name) file_png = Path(D_LOGS['plot'] / Path(title1)).with_suffix('.png') plt.savefig(file_png) plt.close("all") return def arima_order(df:pd.DataFrame=None, data_col_name:str=None,training_size:int=512,title:str="Time Series", max_order:tuple=(2,2,2), max_seasonal_order:tuple=(1,0,1,6), f:object=None)->((int,int,int),(int,int,int,int)): n = len(df[data_col_name]) start_index=0 if n>training_size: start_index=n-training_size (P,D,Q)=max_order (SP,SD,SQ,S)=max_seasonal_order opt_order=(0,0,0) opt_aic=1e+12 for p in range(P+1): for d in range(D+1): for q in range(Q+1): order=(p,d,q) seasonal_order = (0, 0, 0, 0) errmsg="" try: model = SARIMAX(df[data_col_name][start_index:], order=order, seasonal_order=seasonal_order) model_fit = model.fit(disp=0) except: errmsg = f""" SARIMA optimal order searching ({p},{d},{q})X(0,0,0) Oops!! Unexpected error... Error : {sys.exc_info()[0]} """ finally: if len(errmsg) > 0: msg2log(None, errmsg, D_LOGS['except']) break if model_fit.aic<opt_aic: opt_aic=model_fit.aic opt_order=(p,d,q) msg2log(None,"ARIMA({},{},{}): AIC={}".format(p,d,q,model_fit.aic)) opt_seasonal_order = (0, 0, 0, S) opt_seasonal_aic = 1e+12 opt_sarima_aic = opt_aic+1.0 if S>0: opt_seasonal_order = (0, 0, 0,S) opt_seasonal_aic = 1e+12 for sp in range(SP+1): for sd in range(SD+1): for sq in range(SQ+1): seasonal_order=(sp,sd,sq,S) order=(0,0,0) errmsg="" try: model = SARIMAX(df[data_col_name][start_index:], order=order, seasonal_order=seasonal_order) model_fit = model.fit(disp=0) except: errmsg = f""" SARIMA optimal order searching (0,0,0)X({sp},{sd},{sq}):{S} Oops!! Unexpected error... Error : {sys.exc_info()[0]} """ finally: if len(errmsg)>0: msg2log(None,errmsg,D_LOGS['except']) break if model_fit.aic < opt_seasonal_aic: opt_seasonal_aic = model_fit.aic opt_seasonal_order = (sp, sd, sq,S) msg2log(None, "ARIMA(0,0,0)x({},{},{},{}): AIC={}".format(sp, sd, sq, S, model_fit.aic)) seasonal_order = (0, 0, 0, 0) opt_sarima_aic=1e+12 model = SARIMAX(df[data_col_name][start_index:], order=opt_order, seasonal_order=opt_seasonal_order) model_fit = model.fit(disp=0) opt_sarima_aic=model_fit.aic message=f"""SARIMA models comparison SARIMA({opt_order})x(0,0,0,0) : AIC={opt_aic} SARIMA(0,0,0)x({opt_seasonal_order}) : AIC={opt_seasonal_aic} SARIMA({opt_order})x({opt_seasonal_order}) : AIC={opt_sarima_aic} """ msg2log(None,message,f) if opt_aic<opt_seasonal_aic and opt_aic<opt_sarima_aic: order=opt_order seasonal_order=(0,0,0,0) elif opt_seasonal_aic<opt_aic and opt_seasonal_aic<opt_sarima_aic: opder=(0,0,0) seasonal_order=opt_seasonal_order elif opt_sarima_aic<opt_aic and opt_sarima_aic < opt_seasonal_aic: order=opt_order seasonal_order=opt_seasonal_order return order,seasonal_order def arima_run(df:pd.DataFrame=None, data_col_name:str=None,dt_col_name:str="Date Time",chunk_offset:int=0, chunk_size:int=8192, in_sample_start:int=0,in_sample_size:int=512, forecast_period:int=4, title:str="Time Series", order:tuple=(1,0,0),seasonal_order:tuple=(0,0,0,0), f:object=None): pass n=len(df[data_col_name]) if chunk_size+chunk_offset>n: chunk_size=n-chunk_offset cho=chunk_offset tcho=df[dt_col_name][chunk_offset] chs=chunk_size tchs=df[dt_col_name][chunk_offset+chunk_size-1] if in_sample_start + in_sample_size>n: in_sample_size=n-in_sample_start iss=in_sample_start tiss=df[dt_col_name][iss] isl=in_sample_start+in_sample_size-1 tisl=df[dt_col_name][isl] message=f"""{data_col_name} TS length: {n} The chunk of TS for ARIMA estimation: start offset : {cho} timestamp: {tcho} chunk size : {chs} last timesamp: {tchs} In-sample predict from index: {iss}, timestamp {tiss} till index: {isl}, last timestamp {tisl} ARIMA order: p = {order[0]} d = {order[1]} q = {order[2]} SARIMA order: P = {seasonal_order[0]} D = {seasonal_order[1]} Q ={seasonal_order[2]} Seasonal Period = {seasonal_order[3]} """ msg2log(None,message,f) msg2log(None,message,D_LOGS['main']) log2All() model = SARIMAX(df[data_col_name][chunk_offset:], order=order, seasonal_order=seasonal_order) model_fit = model.fit(disp=0) msg2log(None,model_fit.summary(),D_LOGS['predict']) msg2log(None, model_fit.param_names, D_LOGS['predict']) y_pred_series = model_fit.predict(start=in_sample_start, end=in_sample_start + in_sample_size-1) y_pred=np.array(y_pred_series) y=np.array(df[data_col_name][in_sample_start:in_sample_start+in_sample_size]) err = np.round(np.subtract(y, y_pred),decimals=4) X,predict_dict=predict_bundle(y = y, y_pred = y_pred, err = err, start_index=in_sample_start,f = f) title = "{:^60s}\n{:^10s} {:^10s} {:^10s} {:^10s} {:^10s} {:^10s}".format(data_col_name, "NN","Index","Obs Value", "Predict","Error","Abs.Err") logMatrix(X, title= title, wideformat = '10.4f', f=D_LOGS['predict']) plotPredict(predict_dict=predict_dict, data_col_name=data_col_name, title= "in_sample_predict", f=D_LOGS['predict']) forecast_arr_series=model_fit.forecast(steps=forecast_period) forecast_arr=np.array(forecast_arr_series) X1,forecast_dict = predict_bundle(y=forecast_arr, y_pred=None, err=None, start_index=n, f=f) title = "{:^60s}\n{:^14s} {:^14s} {:^14s} ".format(data_col_name, "NN", "Index", "Forecast") logMatrix(X1, title=title, wideformat='14.8f', f=D_LOGS['predict']) plotPredict(predict_dict=forecast_dict, data_col_name=data_col_name, title="forecasting", f=D_LOGS['predict']) return def predict_bundle(y:np.array=None, y_pred:np.array=None, err:np.array=None,start_index:int=0,f:object=None)->(np.array, dict): predict_dict={} (n,) = y.shape z = np.array([i for i in range(start_index, start_index + n)]) predict_dict["ind"] = copy.copy(z) if y_pred is None: predict_dict["forecast"]=y else: predict_dict["observation"]=y predict_dict["in_sample_predict"]=y_pred y1 = y.reshape((n, 1)) err_abs = None pred_err_abs = None y2=None if err is not None: abserr=np.round(np.absolute(err),decimals=4) predict_dict["error"]=copy.copy(err) predict_dict["abserror"] = copy.copy(abserr) err=err.reshape((n,1)) abserr=abserr.reshape((n,1)) err_abs=np.append(err,abserr,axis=1) if y_pred is not None and err_abs is not None: y2=y_pred.reshape((n,1)) pred_err_abs = np.append(y2, err_abs, axis=1) elif y_pred is not None and err_abs is None: pred_err_abs = y_pred.reshape((n,1)) if pred_err_abs is not None: y_pred_err_abs = np.append(y1, pred_err_abs, axis=1) else: y_pred_err_abs=y1 z = np.array([i for i in range(start_index,start_index+n)]) predict_dict["ind"] = copy.copy(z) z = z.reshape((n, 1)) X = np.append(z, y_pred_err_abs, axis=1) return X, predict_dict def plotPredict(predict_dict:dict=None, data_col_name:str="",title:str="in_sample_predict",f:object=None): df = pd.DataFrame(predict_dict) sFolder = Path(D_LOGS['plot'] / Path(data_col_name) / Path(title)) sFolder.mkdir(parents=True, exist_ok=True) title1 = "{}_{}".format(title, data_col_name) test_predictions_file = Path(sFolder / Path(title1)).with_suffix('.csv') df.to_csv(test_predictions_file, index=False) msg = "{} test sequence predict by {} ARIMA model saved in \n{}\n".format(data_col_name, title, test_predictions_file) msg2log(None, msg, D_LOGS['predict']) plotPredictDF(test_predictions_file, data_col_name, title=title1) return
py
1a3aedf4b1007195923c00d3b0ccf9e0d12719f2
class Solution: def threeConsecutiveOdds(self, arr: List[int]) -> bool: consecurive_odds = 0 for n in arr: if n % 2 == 1: consecurive_odds += 1 if consecurive_odds == 3: return True else: consecurive_odds = 0 return False
py
1a3aee69a56c86c0fff7fbbf69c0465e8f8e6605
# -*- coding: utf-8 -*- import cv2 import matplotlib.pyplot as lt import sys img = cv2.imread ('C:/Users/Victor Pacheco Garci/Desktop/UPC/2017-18/GDSA/Team5/Farmacia Albinyana/farmacia_albinyana_101.jpg',1) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) sift = cv2.SIFT() # busca directament els punts clau i els descriptors. # kp serà una llista de punts clau en una matriu numpy de Nombre de key points X128. kp,des = sift.detectAndCompute(gray,None) lt.imshow (cv2.drawKeypoints(cv2.cvtColor(img, cv2.COLOR_BGR2RGB),kp)) lt.show()
py
1a3aee78e15a3fea30027273ff05c2302f68c928
import datetime from django.test import TestCase from django.utils import timezone from django.urls import reverse from .models import Question # Create your tests here. def create_question(question_text, days): """ Create a question with the given `question_text` and published the given number of `days` offset to now (negative for questions published in the past, positive for questions that have yet to be published). """ time = timezone.now() + datetime.timedelta(days=days) query = Question.objects.create(question_text=question_text, pub_date=time) return query # ============== VIEWS ============== class QuestionIndexViewTests(TestCase): url = reverse('polls:index') def test_no_questions(self): """ If no questions exist, an appropriate message is displayed. """ response = self.client.get(self.url) self.assertEqual(response.status_code, 200) self.assertContains(response, "No polls are available.") self.assertQuerysetEqual(response.context['latest_question_list'], []) def test_past_question(self): """ Questions with a pub_date in the past are displayed on the index page. """ create_question(question_text="Past question.", days=-30) response = self.client.get(self.url) self.assertQuerysetEqual( response.context['latest_question_list'], [ '<Question: Past question.>'] ) def test_future_question(self): """ Questions with a pub_date in the future aren't displayed on the index page. """ create_question(question_text="Future question.", days=30) response = self.client.get(self.url) self.assertContains(response, "No polls are available") self.assertQuerysetEqual(response.context['latest_question_list'], []) def test_future_question_and_past_question(self): """ Even if both past and future questions exist, only past questions are displayed. """ create_question(question_text="Future question.", days=30) create_question(question_text="Past question.", days=-30) response = self.client.get(self.url) self.assertQuerysetEqual( response.context['latest_question_list'], [ '<Question: Past question.>'] ) def test_two_past_questions(self): """ The questions index page may display multiple questions. """ create_question(question_text="Past question 1.", days=-30) create_question(question_text="Past question 2.", days=-5) response = self.client.get(self.url) self.assertQuerysetEqual( response.context['latest_question_list'], ['<Question: Past question 2.>', '<Question: Past question 1.>'] ) class QuestionDetailViewTests(TestCase): def test_future_question(self): """ The detail view of a question with a pub_date in the future returns a 404 not found. """ future_question = create_question( question_text='Future question.', days=5) url = reverse('polls:detail', args=(future_question.id,)) response = self.client.get(url) self.assertEqual(response.status_code, 404) def test_past_question(self): """ The detail view of a question with a pub_date in the past displays the question's text. """ past_question = create_question( question_text='Past Question.', days=-5) url = reverse('polls:detail', args=(past_question.id,)) response = self.client.get(url) self.assertContains(response, past_question.question_text) # ============== MODELS ============== class QuestionModelTests(TestCase): def test_was_published_recently_with_future_question(self): """ was_published_recently() returns False for questions whose pub_date is in the future. """ time = timezone.now() + datetime.timedelta(days=30) future_question = Question(pub_date=time) self.assertIs(future_question.was_published_recently(), False) def test_was_published_recently_with_old_question(self): """ was_published_recently() returns False for questions whose pub_date is older than 1 day. """ time = timezone.now() - datetime.timedelta(days=1, seconds=1) old_question = Question(pub_date=time) self.assertIs(old_question.was_published_recently(), False) def test_was_published_recently_with_recent_question(self): """ was_published_recently() returns True for questions whose pub_date is within the last day. """ time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59) recent_question = Question(pub_date=time) self.assertIs(recent_question.was_published_recently(), True)
py
1a3af0fcf9ee82ab34598a3efe23366c09b2d664
# Source: https://gist.githubusercontent.com/rogerallen/1583593/raw/0fffdee6149ab1d993dffa51b1fa9aa466704e18/us_state_abbrev.py # United States of America Python Dictionary to translate States, # Districts & Territories to Two-Letter codes and vice versa. # # https://gist.github.com/rogerallen/1583593 # # Dedicated to the public domain. To the extent possible under law, # Roger Allen has waived all copyright and related or neighboring # rights to this code. US_STATE_ABBREV = { "Alabama": "AL", "Alaska": "AK", "American Samoa": "AS", "Arizona": "AZ", "Arkansas": "AR", "California": "CA", "Colorado": "CO", "Connecticut": "CT", "Delaware": "DE", "District of Columbia": "DC", "Florida": "FL", "Georgia": "GA", "Guam": "GU", "Hawaii": "HI", "Idaho": "ID", "Illinois": "IL", "Indiana": "IN", "Iowa": "IA", "Kansas": "KS", "Kentucky": "KY", "Louisiana": "LA", "Maine": "ME", "Maryland": "MD", "Massachusetts": "MA", "Michigan": "MI", "Minnesota": "MN", "Mississippi": "MS", "Missouri": "MO", "Montana": "MT", "Nebraska": "NE", "Nevada": "NV", "New Hampshire": "NH", "New Jersey": "NJ", "New Mexico": "NM", "New York": "NY", "North Carolina": "NC", "North Dakota": "ND", "Northern Mariana Islands": "MP", "Ohio": "OH", "Oklahoma": "OK", "Oregon": "OR", "Pennsylvania": "PA", "Puerto Rico": "PR", "Rhode Island": "RI", "South Carolina": "SC", "South Dakota": "SD", "Tennessee": "TN", "Texas": "TX", "Utah": "UT", "Vermont": "VT", "Virgin Islands": "VI", "Virginia": "VA", "Washington": "WA", "West Virginia": "WV", "Wisconsin": "WI", "Wyoming": "WY", } STATES_50 = { "Alabama": "AL", "Alaska": "AK", "Arizona": "AZ", "Arkansas": "AR", "California": "CA", "Colorado": "CO", "Connecticut": "CT", "Delaware": "DE", "District of Columbia": "DC", "Florida": "FL", "Georgia": "GA", "Hawaii": "HI", "Idaho": "ID", "Illinois": "IL", "Indiana": "IN", "Iowa": "IA", "Kansas": "KS", "Kentucky": "KY", "Louisiana": "LA", "Maine": "ME", "Maryland": "MD", "Massachusetts": "MA", "Michigan": "MI", "Minnesota": "MN", "Mississippi": "MS", "Missouri": "MO", "Montana": "MT", "Nebraska": "NE", "Nevada": "NV", "New Hampshire": "NH", "New Jersey": "NJ", "New Mexico": "NM", "New York": "NY", "North Carolina": "NC", "North Dakota": "ND", "Ohio": "OH", "Oklahoma": "OK", "Oregon": "OR", "Pennsylvania": "PA", "Rhode Island": "RI", "South Carolina": "SC", "South Dakota": "SD", "Tennessee": "TN", "Texas": "TX", "Utah": "UT", "Vermont": "VT", "Virginia": "VA", "Washington": "WA", "West Virginia": "WV", "Wisconsin": "WI", "Wyoming": "WY", } us_fips = { "Alabama": "01", "Alaska": "02", "Arizona": "04", "Arkansas": "05", "California": "06", "Colorado": "08", "Connecticut": "09", "Delaware": "10", "District of Columbia": "11", "Florida": "12", "Georgia": "13", "Hawaii": "15", "Idaho": "16", "Illinois": "17", "Indiana": "18", "Iowa": "19", "Kansas": "20", "Kentucky": "21", "Louisiana": "22", "Maine": "23", "Maryland": "24", "Massachusetts": "25", "Michigan": "26", "Minnesota": "27", "Mississippi": "28", "Missouri": "29", "Montana": "30", "Nebraska": "31", "Nevada": "32", "New Hampshire": "33", "New Jersey": "34", "New Mexico": "35", "New York": "36", "North Carolina": "37", "North Dakota": "38", "Ohio": "39", "Oklahoma": "40", "Oregon": "41", "Pennsylvania": "42", "Rhode Island": "44", "South Carolina": "45", "South Dakota": "46", "Tennessee": "47", "Texas": "48", "Utah": "49", "Vermont": "50", "Virginia": "51", "Washington": "53", "West Virginia": "54", "Wisconsin": "55", "Wyoming": "56", "American Samoa": "60", "Guam": "66", "Northern Mariana Islands": "69", "Puerto Rico": "72", "Virgin Islands": "78", } ABBREV_US_FIPS = {US_STATE_ABBREV[state]: fips for state, fips in us_fips.items()} # thank you to @kinghelix and @trevormarburger for this idea abbrev_us_state = dict(map(reversed, US_STATE_ABBREV.items())) # Simple test examples if __name__ == "__main__": print("Wisconin --> WI?", US_STATE_ABBREV["Wisconsin"] == "WI") print("WI --> Wisconin?", abbrev_us_state["WI"] == "Wisconsin") print( "Number of entries (50 states, DC, 5 Territories) == 56? ", 56 == len(US_STATE_ABBREV), )
py
1a3af157396067c736311d4689c008545fa267aa
""" StackedMail model """ from django.db import models class StackedMailEntry(models.Model): """ Mail to be sent """ sender = models.EmailField() receiver = models.EmailField() title = models.TextField(default='') content = models.TextField(default='') created_at = models.DateTimeField(auto_now_add=True) sent_at = models.DateTimeField(auto_now_add=True) is_sent = models.BooleanField(default=False) def __unicode__(self): return u'Title: {0}'.format(self.title) def get_email_as_data(self): """ Returns data to feed send_mass_mail """ data = [] data.append(self.title) data.append(self.content) data.append(self.sender) data.append([self.receiver]) return data
py
1a3af319001d835a07e306558aec8de0b6ad9e39
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') digit_version.append(int(patch_version[0]) - 1) digit_version.append(int(patch_version[1])) return digit_version mmcv_minimum_version = '1.3.15' mmcv_maximum_version = '1.5.0' mmcv_version = digit_version(mmcv.__version__) assert (mmcv_version >= digit_version(mmcv_minimum_version) and mmcv_version <= digit_version(mmcv_maximum_version)), \ f'MMCV=={mmcv.__version__} is used but incompatible. ' \ f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' __all__ = ['__version__', 'short_version']
py
1a3af33ab6d5ea6f1de62d595e3f74f02867ab6e
# coding: utf-8 from .base import BookwormService
py
1a3af3585ab5d8dd6f5a5b960d4cb946ba433c02
"""JSON input interface.""" import gzip import json from pathlib import Path from typing import Optional, TextIO, Union from ..music import Music def load_json( path: Union[str, Path, TextIO], compressed: Optional[bool] = None ) -> Music: """Load a JSON file into a Music object. Parameters ---------- path : str, Path or TextIO Path to the file or the file to load. compressed : bool, optional Whether the file is a compressed JSON file (`.json.gz`). Has no effect when `path` is a file object. Defaults to infer from the extension (`.gz`). Returns ------- :class:`muspy.Music` Loaded Music object. Notes ----- When a path is given, assume UTF-8 encoding and gzip compression if `compressed=True`. """ if isinstance(path, (str, Path)): if compressed is None: if str(path).lower().endswith(".gz"): compressed = True else: compressed = False if compressed: with gzip.open(path, "rt", encoding="utf-8") as f: return Music.from_dict(json.load(f)) with open(path, encoding="utf-8") as f: return Music.from_dict(json.load(f)) return Music.from_dict(json.load(path))
py
1a3af3fdbc281db07b23f3b3c0887bc7c4941ff9
"""Draw the histgram of the pose distributions Run it like this: `python3 -m experimental.distribution.py` Do not forget to set the dataset file path. """ import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np from dataset import get_parsed_dataset from experimental.pose_estimator import PoseEstimator if __name__ == "__main__": ds = get_parsed_dataset("data/helen.record", 1, False) # Counters n_faces = 0 pitches = [] yaws = [] rolls = [] for image, marks in ds: # image = (image.numpy()[0]*255).astype(np.uint8) height, width = image.shape[1:3] pose_estimator = PoseEstimator(img_size=(height, width)) marks = np.reshape(marks, (-1, 2))*width pose = pose_estimator.solve_pose_by_68_points(marks) # Solve the pitch, yaw and roll angels. r_mat, _ = cv2.Rodrigues(pose[0]) p_mat = np.hstack((r_mat, np.array([[0], [0], [0]]))) _, _, _, _, _, _, u_angle = cv2.decomposeProjectionMatrix(p_mat) pitch, yaw, roll = u_angle.flatten() # I do not know why the roll axis seems flipted 180 degree. Manually by pass # this issue. if roll > 0: roll = 180-roll elif roll < 0: roll = -(180 + roll) pitches.append(pitch) yaws.append(yaw) rolls.append(roll) n_faces += 1 # print("pitch: {:.2f}, yaw: {:.2f}, roll: {:.2f}".format( # pitch, yaw, roll)) # for mark in marks: # cv2.circle(image, tuple(mark), 1, (0, 255, 0), 1) # cv2.imshow("image", image) # if cv2.waitKey() == 27: # break fig, ax = plt.subplots(3, 1) ax[0].hist(pitches, 40, (-60, 60), density=True) ax[1].hist(yaws, 40, (-60, 60), density=True) ax[2].hist(rolls, 40, (-60, 60), density=True) plt.show() print(n_faces)
py
1a3af53d058bf16ce0080e779f4c5ab2e94089d6
""" Exercício 101 def voto(a): from datetime import date hoje = date.today().year idade = hoje - a if idade < 16: return f'Com {idade} anos: NÃO VOTA!' elif idade > 18 and idade < 65: return f'Com {idade} anos: VOTO OBRIGATÓRIO!' else: return f'Com {idade} anos: VOTO OPCIONAL' born = int(input('Digite a data de nascimento: ')) votacao = voto(born) print(votacao) """ """ Exercício 102 def fatorial(a, show=False): ''' -> Calcula o Fatorial de um número :param a: o número a ser calculado :param show: (opcional) Mostrar ou não o cálculo :return: O valor fatorial do número informado. ''' fact = 1 for c in range(a, 0, -1): if show: print(c, end='') if c> 1: print(' x ', end='') else: print(' = ', end='') fact *= c return fact fator = 18 p = fatorial(fator, show=True) print(p) help(fatorial) """ """ Exercício 103 def ficha(a='<desconhecido>', b=0): print(f'O jogador {a} fez {b} gol(s) no campeonato!') nome = str(input('Nome do jogador: ')) gols = str(input('Quantidade de gols: ')) if gols.isnumeric(): gols = int(gols) else: gols = 0 if nome.strip() == '': ficha(b=gols) else: ficha(nome, gols) """ """ Exercício 104 """ def leiaInt(ite): while True: try: valor = int(input(ite)) except (ValueError, TypeError): print('\033[0;31mERRO! Tivemos um problema com o tipo de dado que você digitou!\033[m') continue except KeyboardInterrupt: print('\033[0;31mERRO! O usuário preferiu não informar os dados!\033[m') return 0 else: return valor def leiaFloat(fro): while True: try: valor = float(input(fro)) except (ValueError, TypeError): print('\033[0;31mERRO! Tivemos um problema com o tipo de dado que você digitou!\033[m') continue except KeyboardInterrupt: print('\033[0;31mERRO! O usuário preferiu não informar os dados!\033[m') return 0 else: return valor # Programa principal i = leiaInt('Digite um número inteiro: ') #n = int(input(': ')) print(f'Você acabou de digitar o número inteiro: {i}!') r = leiaFloat('Digite um número real: ') print(f'Você acabou de digitar o número real: {r}') ''' Exercício 105 def notas(* n, sit=False): """ :param n: Lista de notas :param sit: (opcional) Se True, informará a situação de acordo com a média. :return: um dicionário com os dados. """ alunos = {} alunos['quant'] = len(n) alunos['maior'] = max(n) alunos['menor'] = min(n) alunos['media'] = sum(n)/len(n) if sit: if alunos['media'] >= 8: alunos['sit'] = 'ÓTIMA' elif 8 > alunos['media'] >= 6: alunos['sit'] = 'BOA' elif 6 > alunos['media'] > 3: alunos['sit'] = 'RUIM' else: alunos['sit'] = 'PÉSSIMA' return alunos resp = notas(9,7.5, 8, 7.5, 8, sit=True) print(resp) ''' """ Exercício 106 def ajuda(msg): print(help(msg)) while True: com = str(input('\033[035mDigite o comando poara consultar ajuda: [FIM para sair]\033[m')) if com in 'FIMfimFim': break else: ajuda(com) print('FIM!') """
py
1a3af550c3072fad293e3cf4006a416e25de8b3e
# -*- coding: utf-8 -*- # Opções para formulários ufs = ['SP', 'AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MT', 'MS', 'MG', 'PR', 'PB', 'PA', 'PE', 'PI', 'RJ', 'RN', 'RS', 'RO', 'RR', 'SC', 'SE', 'TO'] escolaridade = ['Nenhuma', '1º grau', '2º grau', 'Superior'] cor = ['Branca', 'Negra', 'Parda', 'Indígena', 'Asiática'] estadocivil = ['Casada', 'Solteira (sem união estável)', 'Solteira (com união estável)', 'Outra'] sexo = ['Feminino', 'Masculino'] tipo = ['Particular', 'Convênio'] db.define_table('pacientes', Field('nome', label='Nome', requires=IS_NOT_EMPTY()), Field('sexo', label='Sexo', default='Feminino', requires=IS_IN_SET(sexo)), Field('cpf', label='CPF'), Field('profissao', label='Profissão'), Field('nascimento', label='Data de nascimento', type='date', requires=[IS_NOT_EMPTY(), IS_DATE(format='%d/%m/%Y')]), Field('telefone', label='Telefone', requires=IS_NOT_EMPTY()), Field('escolaridade', label='Escolaridade', requires=IS_EMPTY_OR(IS_IN_SET(escolaridade))), Field('estadocivil', label='Estado civil', requires=IS_EMPTY_OR(IS_IN_SET(estadocivil))), Field('cor', label='Cor', requires=IS_EMPTY_OR(IS_IN_SET(cor))), Field('image', 'upload', label='Foto'), Field('endereco', label='Endereço'), Field('cidade', label='Cidade'), Field('uf', label='UF', default='SP', requires=IS_EMPTY_OR(IS_IN_SET(ufs))), Field('cep', label='CEP'), Field('observacoes', label='Observações', type='text'), Field('tipo', label='Tipo de atendimento', requires=IS_IN_SET(tipo)), format='%(nome)s' ) db.pacientes.cpf.represent = lambda field, x: field if field else 'Não informado' def BuscaPaciente(id): if not id: raise HTTP(404, 'ID paciente não encontrado') try: paciente = db(db.pacientes.id == id).select().first() except ValueError: raise HTTP(404, 'Argumento PACIENTE inválido') if not paciente: raise HTTP(404, 'Paciente não encontrado') if paciente.nascimento: paciente.nascimento = paciente.nascimento.strftime('%d/%m/%Y') NI = 'Não informado' campos = [] campos.append(paciente.endereco) campos.append(paciente.cidade) campos.append(paciente.telefone) campos.append(paciente.escolaridade) campos.append(paciente.observacoes) campos.append(paciente.cpf) campos.append(paciente.uf) campos.append(paciente.estadocivil) campos.append(paciente.cor) campos.append(paciente.cep) for campo in campos: if not campo: campo = NI return paciente def BuscaTodosPacientes(): pacientes = db(db.pacientes).select() return pacientes
py
1a3af576d71abd883d333407437e0abe4c2e94f8
"""Utility functions for transducer models.""" import os import numpy as np import torch from espnet_pytorch_library.nets_utils import pad_list def prepare_loss_inputs(ys_pad, hlens, blank_id=0, ignore_id=-1): """Prepare tensors for transducer loss computation. Args: ys_pad (torch.Tensor): batch of padded target sequences (B, Lmax) hlens (torch.Tensor): batch of hidden sequence lengthts (B) or batch of masks (B, 1, Tmax) blank_id (int): index of blank label ignore_id (int): index of initial padding Returns: ys_in_pad (torch.Tensor): batch of padded target sequences + blank (B, Lmax + 1) target (torch.Tensor): batch of padded target sequences (B, Lmax) pred_len (torch.Tensor): batch of hidden sequence lengths (B) target_len (torch.Tensor): batch of output sequence lengths (B) """ device = ys_pad.device ys = [y[y != ignore_id] for y in ys_pad] blank = ys[0].new([blank_id]) ys_in_pad = pad_list([torch.cat([blank, y], dim=0) for y in ys], blank_id) ys_out_pad = pad_list([torch.cat([y, blank], dim=0) for y in ys], ignore_id) target = pad_list(ys, blank_id).type(torch.int32).to(device) target_len = torch.IntTensor([y.size(0) for y in ys]).to(device) if torch.is_tensor(hlens): if hlens.dim() > 1: hs = [h[h != 0] for h in hlens] hlens = list(map(int, [h.size(0) for h in hs])) else: hlens = list(map(int, hlens)) pred_len = torch.IntTensor(hlens).to(device) return ys_in_pad, ys_out_pad, target, pred_len, target_len def valid_aux_task_layer_list(aux_layer_ids, enc_num_layers): """Check whether input list of auxiliary layer ids is valid. Return the valid list sorted with duplicated removed. Args: aux_layer_ids (list): Auxiliary layers ids enc_num_layers (int): Number of encoder layers Returns: valid (list): Validated list of layers for auxiliary task """ if ( not isinstance(aux_layer_ids, list) or not aux_layer_ids or not all(isinstance(layer, int) for layer in aux_layer_ids) ): raise ValueError("--aux-task-layer-list argument takes a list of layer ids.") sorted_list = sorted(aux_layer_ids, key=int, reverse=False) valid = list(filter(lambda x: 0 <= x < enc_num_layers, sorted_list)) if sorted_list != valid: raise ValueError( "Provided list of layer ids for auxiliary task is incorrect. " "IDs should be between [0, %d]" % (enc_num_layers - 1) ) return valid def is_prefix(x, pref): """Check prefix. Args: x (list): token id sequence pref (list): token id sequence Returns: (boolean): whether pref is a prefix of x. """ if len(pref) >= len(x): return False for i in range(len(pref)): if pref[i] != x[i]: return False return True def substract(x, subset): """Remove elements of subset if corresponding token id sequence exist in x. Args: x (list): set of hypotheses subset (list): subset of hypotheses Returns: final (list): new set """ final = [] for x_ in x: if any(x_.yseq == sub.yseq for sub in subset): continue final.append(x_) return final def select_lm_state(lm_states, idx, lm_layers, is_wordlm): """Get LM state from batch for given id. Args: lm_states (list or dict): batch of LM states idx (int): index to extract state from batch state lm_layers (int): number of LM layers is_wordlm (bool): whether provided LM is a word-LM Returns: idx_state (dict): LM state for given id """ if is_wordlm: idx_state = lm_states[idx] else: idx_state = {} idx_state["c"] = [lm_states["c"][layer][idx] for layer in range(lm_layers)] idx_state["h"] = [lm_states["h"][layer][idx] for layer in range(lm_layers)] return idx_state def create_lm_batch_state(lm_states_list, lm_layers, is_wordlm): """Create batch of LM states. Args: lm_states (list or dict): list of individual LM states lm_layers (int): number of LM layers is_wordlm (bool): whether provided LM is a word-LM Returns: batch_states (list): batch of LM states """ if is_wordlm: batch_states = lm_states_list else: batch_states = {} batch_states["c"] = [ torch.stack([state["c"][layer] for state in lm_states_list]) for layer in range(lm_layers) ] batch_states["h"] = [ torch.stack([state["h"][layer] for state in lm_states_list]) for layer in range(lm_layers) ] return batch_states def init_lm_state(lm_model): """Initialize LM state. Args: lm_model (torch.nn.Module): LM module Returns: lm_state (dict): initial LM state """ lm_layers = len(lm_model.rnn) lm_units_typ = lm_model.typ lm_units = lm_model.n_units p = next(lm_model.parameters()) h = [ torch.zeros(lm_units).to(device=p.device, dtype=p.dtype) for _ in range(lm_layers) ] lm_state = {"h": h} if lm_units_typ == "lstm": lm_state["c"] = [ torch.zeros(lm_units).to(device=p.device, dtype=p.dtype) for _ in range(lm_layers) ] return lm_state def recombine_hyps(hyps): """Recombine hypotheses with equivalent output sequence. Args: hyps (list): list of hypotheses Returns: final (list): list of recombined hypotheses """ final = [] for hyp in hyps: seq_final = [f.yseq for f in final if f.yseq] if hyp.yseq in seq_final: seq_pos = seq_final.index(hyp.yseq) final[seq_pos].score = np.logaddexp(final[seq_pos].score, hyp.score) else: final.append(hyp) return hyps def pad_sequence(seqlist, pad_token): """Left pad list of token id sequences. Args: seqlist (list): list of token id sequences pad_token (int): padding token id Returns: final (list): list of padded token id sequences """ maxlen = max(len(x) for x in seqlist) final = [([pad_token] * (maxlen - len(x))) + x for x in seqlist] return final def check_state(state, max_len, pad_token): """Check state and left pad or trim if necessary. Args: state (list): list of of L decoder states (in_len, dec_dim) max_len (int): maximum length authorized pad_token (int): padding token id Returns: final (list): list of L padded decoder states (1, max_len, dec_dim) """ if state is None or max_len < 1 or state[0].size(1) == max_len: return state curr_len = state[0].size(1) if curr_len > max_len: trim_val = int(state[0].size(1) - max_len) for i, s in enumerate(state): state[i] = s[:, trim_val:, :] else: layers = len(state) ddim = state[0].size(2) final_dims = (1, max_len, ddim) final = [state[0].data.new(*final_dims).fill_(pad_token) for _ in range(layers)] for i, s in enumerate(state): final[i][:, (max_len - s.size(1)) : max_len, :] = s return final return state def check_batch_state(state, max_len, pad_token): """Check batch of states and left pad or trim if necessary. Args: state (list): list of of L decoder states (B, ?, dec_dim) max_len (int): maximum length authorized pad_token (int): padding token id Returns: final (list): list of L decoder states (B, pred_len, dec_dim) """ final_dims = (len(state), max_len, state[0].size(1)) final = state[0].data.new(*final_dims).fill_(pad_token) for i, s in enumerate(state): curr_len = s.size(0) if curr_len < max_len: final[i, (max_len - curr_len) : max_len, :] = s else: final[i, :, :] = s[(curr_len - max_len) :, :] return final def custom_torch_load(model_path, model, training=True): """Load transducer model modules and parameters with training-only ones removed. Args: model_path (str): Model path model (torch.nn.Module): The model with pretrained modules """ if "snapshot" in os.path.basename(model_path): model_state_dict = torch.load( model_path, map_location=lambda storage, loc: storage )["model"] else: model_state_dict = torch.load( model_path, map_location=lambda storage, loc: storage ) if not training: model_state_dict = { k: v for k, v in model_state_dict.items() if not k.startswith("aux") } model.load_state_dict(model_state_dict) del model_state_dict
py
1a3af98f1aec24540e093e555d9ed2b25f848dff
from diot import Diot from bioprocs.utils.tsvio2 import TsvReader, TsvWriter infile = {{i.infile | quote}} outdir = {{o.outdir | quote}} prefix = {{i.infile | stem2 | quote}} ext = {{i.infile | ext | quote}} inopts = {{args.inopts | repr}} outopts = {{args.outopts | repr}} by = {{args.by | ? :isinstance(_, str) and _.startswith('col:') | = :quote(_[4:]) | ! :_}} reader = TsvReader(infile, **inopts) outheader = outopts.pop('header', True) by_type = None if isinstance(by, int): by_type = 'size' elif isinstance(by, str): by_type = 'column' if by.isdigit(): by = int(by) elif callable(by): by_type = 'callable' else: raise ValueError('Unsupported `args.by = %r`' % by) def get_writer(tag): outfile = outdir + "/" + prefix + "_" + tag + ext writer = TsvWriter(outfile, **outopts) if outheader is True: writer.cnames = reader.cnames if writer.cnames: writer.writeHead() elif outheader: writer.cnames = list(outheader) writer.writeHead() return writer outfiles = {} for i, r in enumerate(reader): if by_type == 'size': tag = int(i / by) elif by_type == 'column': tag = r[by] else: tag = by(r) if tag not in outfiles: outfiles[tag] = get_writer(tag) outfiles[tag].write(r) reader.close() for writer in outfiles.values(): writer.close()
py
1a3afbea9d8e1458eb28cdc50123d4e144b8c2ac
import os import re import sys import textwrap from typing import Dict from typing import Generator import pytest from _pytest.compat import TYPE_CHECKING from _pytest.monkeypatch import MonkeyPatch if TYPE_CHECKING: from typing import Type @pytest.fixture def mp() -> Generator[MonkeyPatch, None, None]: cwd = os.getcwd() sys_path = list(sys.path) yield MonkeyPatch() sys.path[:] = sys_path os.chdir(cwd) def test_setattr() -> None: class A: x = 1 monkeypatch = MonkeyPatch() pytest.raises(AttributeError, monkeypatch.setattr, A, "notexists", 2) monkeypatch.setattr(A, "y", 2, raising=False) assert A.y == 2 # type: ignore monkeypatch.undo() assert not hasattr(A, "y") monkeypatch = MonkeyPatch() monkeypatch.setattr(A, "x", 2) assert A.x == 2 monkeypatch.setattr(A, "x", 3) assert A.x == 3 monkeypatch.undo() assert A.x == 1 A.x = 5 monkeypatch.undo() # double-undo makes no modification assert A.x == 5 class TestSetattrWithImportPath: def test_string_expression(self, monkeypatch): monkeypatch.setattr("os.path.abspath", lambda x: "hello2") assert os.path.abspath("123") == "hello2" def test_string_expression_class(self, monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr("_pytest.config.Config", 42) import _pytest assert _pytest.config.Config == 42 # type: ignore def test_unicode_string(self, monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr("_pytest.config.Config", 42) import _pytest assert _pytest.config.Config == 42 # type: ignore monkeypatch.delattr("_pytest.config.Config") def test_wrong_target(self, monkeypatch): pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None)) def test_unknown_import(self, monkeypatch): pytest.raises(ImportError, lambda: monkeypatch.setattr("unkn123.classx", None)) def test_unknown_attr(self, monkeypatch): pytest.raises( AttributeError, lambda: monkeypatch.setattr("os.path.qweqwe", None) ) def test_unknown_attr_non_raising(self, monkeypatch: MonkeyPatch) -> None: # https://github.com/pytest-dev/pytest/issues/746 monkeypatch.setattr("os.path.qweqwe", 42, raising=False) assert os.path.qweqwe == 42 # type: ignore def test_delattr(self, monkeypatch): monkeypatch.delattr("os.path.abspath") assert not hasattr(os.path, "abspath") monkeypatch.undo() assert os.path.abspath def test_delattr(): class A: x = 1 monkeypatch = MonkeyPatch() monkeypatch.delattr(A, "x") assert not hasattr(A, "x") monkeypatch.undo() assert A.x == 1 monkeypatch = MonkeyPatch() monkeypatch.delattr(A, "x") pytest.raises(AttributeError, monkeypatch.delattr, A, "y") monkeypatch.delattr(A, "y", raising=False) monkeypatch.setattr(A, "x", 5, raising=False) assert A.x == 5 monkeypatch.undo() assert A.x == 1 def test_setitem(): d = {"x": 1} monkeypatch = MonkeyPatch() monkeypatch.setitem(d, "x", 2) monkeypatch.setitem(d, "y", 1700) monkeypatch.setitem(d, "y", 1700) assert d["x"] == 2 assert d["y"] == 1700 monkeypatch.setitem(d, "x", 3) assert d["x"] == 3 monkeypatch.undo() assert d["x"] == 1 assert "y" not in d d["x"] = 5 monkeypatch.undo() assert d["x"] == 5 def test_setitem_deleted_meanwhile() -> None: d = {} # type: Dict[str, object] monkeypatch = MonkeyPatch() monkeypatch.setitem(d, "x", 2) del d["x"] monkeypatch.undo() assert not d @pytest.mark.parametrize("before", [True, False]) def test_setenv_deleted_meanwhile(before): key = "qwpeoip123" if before: os.environ[key] = "world" monkeypatch = MonkeyPatch() monkeypatch.setenv(key, "hello") del os.environ[key] monkeypatch.undo() if before: assert os.environ[key] == "world" del os.environ[key] else: assert key not in os.environ def test_delitem() -> None: d = {"x": 1} # type: Dict[str, object] monkeypatch = MonkeyPatch() monkeypatch.delitem(d, "x") assert "x" not in d monkeypatch.delitem(d, "y", raising=False) pytest.raises(KeyError, monkeypatch.delitem, d, "y") assert not d monkeypatch.setitem(d, "y", 1700) assert d["y"] == 1700 d["hello"] = "world" monkeypatch.setitem(d, "x", 1500) assert d["x"] == 1500 monkeypatch.undo() assert d == {"hello": "world", "x": 1} def test_setenv(): monkeypatch = MonkeyPatch() with pytest.warns(pytest.PytestWarning): monkeypatch.setenv("XYZ123", 2) import os assert os.environ["XYZ123"] == "2" monkeypatch.undo() assert "XYZ123" not in os.environ def test_delenv(): name = "xyz1234" assert name not in os.environ monkeypatch = MonkeyPatch() pytest.raises(KeyError, monkeypatch.delenv, name, raising=True) monkeypatch.delenv(name, raising=False) monkeypatch.undo() os.environ[name] = "1" try: monkeypatch = MonkeyPatch() monkeypatch.delenv(name) assert name not in os.environ monkeypatch.setenv(name, "3") assert os.environ[name] == "3" monkeypatch.undo() assert os.environ[name] == "1" finally: if name in os.environ: del os.environ[name] class TestEnvironWarnings: """ os.environ keys and values should be native strings, otherwise it will cause problems with other modules (notably subprocess). On Python 2 os.environ accepts anything without complaining, while Python 3 does the right thing and raises an error. """ VAR_NAME = "PYTEST_INTERNAL_MY_VAR" def test_setenv_non_str_warning(self, monkeypatch): value = 2 msg = ( "Value of environment variable PYTEST_INTERNAL_MY_VAR type should be str, " "but got 2 (type: int); converted to str implicitly" ) with pytest.warns(pytest.PytestWarning, match=re.escape(msg)): monkeypatch.setenv(str(self.VAR_NAME), value) def test_setenv_prepend(): import os monkeypatch = MonkeyPatch() with pytest.warns(pytest.PytestWarning): monkeypatch.setenv("XYZ123", 2, prepend="-") assert os.environ["XYZ123"] == "2" with pytest.warns(pytest.PytestWarning): monkeypatch.setenv("XYZ123", 3, prepend="-") assert os.environ["XYZ123"] == "3-2" monkeypatch.undo() assert "XYZ123" not in os.environ def test_monkeypatch_plugin(testdir): reprec = testdir.inline_runsource( """ def test_method(monkeypatch): assert monkeypatch.__class__.__name__ == "MonkeyPatch" """ ) res = reprec.countoutcomes() assert tuple(res) == (1, 0, 0), res def test_syspath_prepend(mp: MonkeyPatch): old = list(sys.path) mp.syspath_prepend("world") mp.syspath_prepend("hello") assert sys.path[0] == "hello" assert sys.path[1] == "world" mp.undo() assert sys.path == old mp.undo() assert sys.path == old def test_syspath_prepend_double_undo(mp: MonkeyPatch): old_syspath = sys.path[:] try: mp.syspath_prepend("hello world") mp.undo() sys.path.append("more hello world") mp.undo() assert sys.path[-1] == "more hello world" finally: sys.path[:] = old_syspath def test_chdir_with_path_local(mp: MonkeyPatch, tmpdir): mp.chdir(tmpdir) assert os.getcwd() == tmpdir.strpath def test_chdir_with_str(mp: MonkeyPatch, tmpdir): mp.chdir(tmpdir.strpath) assert os.getcwd() == tmpdir.strpath def test_chdir_undo(mp: MonkeyPatch, tmpdir): cwd = os.getcwd() mp.chdir(tmpdir) mp.undo() assert os.getcwd() == cwd def test_chdir_double_undo(mp: MonkeyPatch, tmpdir): mp.chdir(tmpdir.strpath) mp.undo() tmpdir.chdir() mp.undo() assert os.getcwd() == tmpdir.strpath def test_issue185_time_breaks(testdir): testdir.makepyfile( """ import time def test_m(monkeypatch): def f(): raise Exception monkeypatch.setattr(time, "time", f) """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( """ *1 passed* """ ) def test_importerror(testdir): p = testdir.mkpydir("package") p.join("a.py").write( textwrap.dedent( """\ import doesnotexist x = 1 """ ) ) testdir.tmpdir.join("test_importerror.py").write( textwrap.dedent( """\ def test_importerror(monkeypatch): monkeypatch.setattr('package.a.x', 2) """ ) ) result = testdir.runpytest() result.stdout.fnmatch_lines( """ *import error in package.a: No module named 'doesnotexist'* """ ) class Sample: @staticmethod def hello() -> bool: return True class SampleInherit(Sample): pass @pytest.mark.parametrize( "Sample", [Sample, SampleInherit], ids=["new", "new-inherit"], ) def test_issue156_undo_staticmethod(Sample: "Type[Sample]") -> None: monkeypatch = MonkeyPatch() monkeypatch.setattr(Sample, "hello", None) assert Sample.hello is None monkeypatch.undo() assert Sample.hello() def test_undo_class_descriptors_delattr(): class SampleParent: @classmethod def hello(_cls): pass @staticmethod def world(): pass class SampleChild(SampleParent): pass monkeypatch = MonkeyPatch() original_hello = SampleChild.hello original_world = SampleChild.world monkeypatch.delattr(SampleParent, "hello") monkeypatch.delattr(SampleParent, "world") assert getattr(SampleParent, "hello", None) is None assert getattr(SampleParent, "world", None) is None monkeypatch.undo() assert original_hello == SampleChild.hello assert original_world == SampleChild.world def test_issue1338_name_resolving(): pytest.importorskip("requests") monkeypatch = MonkeyPatch() try: monkeypatch.delattr("requests.sessions.Session.request") finally: monkeypatch.undo() def test_context(): monkeypatch = MonkeyPatch() import functools import inspect with monkeypatch.context() as m: m.setattr(functools, "partial", 3) assert not inspect.isclass(functools.partial) assert inspect.isclass(functools.partial) def test_syspath_prepend_with_namespace_packages(testdir, monkeypatch): for dirname in "hello", "world": d = testdir.mkdir(dirname) ns = d.mkdir("ns_pkg") ns.join("__init__.py").write( "__import__('pkg_resources').declare_namespace(__name__)" ) lib = ns.mkdir(dirname) lib.join("__init__.py").write("def check(): return %r" % dirname) monkeypatch.syspath_prepend("hello") import ns_pkg.hello assert ns_pkg.hello.check() == "hello" with pytest.raises(ImportError): import ns_pkg.world # Prepending should call fixup_namespace_packages. monkeypatch.syspath_prepend("world") import ns_pkg.world assert ns_pkg.world.check() == "world" # Should invalidate caches via importlib.invalidate_caches. tmpdir = testdir.tmpdir modules_tmpdir = tmpdir.mkdir("modules_tmpdir") monkeypatch.syspath_prepend(str(modules_tmpdir)) modules_tmpdir.join("main_app.py").write("app = True") from main_app import app # noqa: F401
py
1a3afc01b07c882d06c722513cbb5ccc3caa198e
import os def screen_clear(): if os.name == 'nt': os.system('cls') else: os.system('clear') screen_clear() print("Starting....\n") import time import cv2 import face_recognition import numpy print("Dependecies imported Successfully") print("Initializing Camera") cap = cv2.VideoCapture(0) print("Camera Initialized Successfully") time.sleep(2) knownFaces = [] knownNames = [] while True: screen_clear() print ("\n\n\t\t\tSMART SURVEILLANCE SYSTEM") n = input("\n\n[Press 1] To Start SURVEILLANCE\n[Press 2] To Register new Face\n[Press 99] To exit\n\n\nEnter Your choice : ") if n == '1': while True: success, img = cap.read() locs = face_recognition.face_locations(img) encoding = face_recognition.face_encodings(img,locs) cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for face,loc in zip(encoding,locs): result = face_recognition.compare_faces(knownFaces, face, 0.5) match = "UNKNOWN" if True in result: match = knownNames[result.index(True)] cv2.rectangle(img, (loc[3],loc[0]), (loc[1],loc[2]), (0,255,0), 2) cv2.rectangle(img, (loc[3],loc[2]-25), (loc[1],loc[2]), (0,255,0),cv2.FILLED) cv2.putText(img, match, (loc[3]+6, loc[2]-6), cv2.FONT_HERSHEY_COMPLEX, 0.65, (255,255,255), 1) cv2.imshow("Searching",img) if cv2.waitKey(1) & 0xFF == ord('q'): cv2.destroyWindow("Searching") break; elif n == '2': name = input("Enter Your Name : ") while True: success, img = cap.read() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) loc = face_recognition.face_locations(img) if len(loc) == 1: cv2.rectangle(img, (loc[0][3],loc[0][0]), (loc[0][1],loc[0][2]), (255,255,255), 1) elif len(loc) < 1: print("NO face Detected") else: print("Many faces Detected") for i in loc: cv2.rectangle(img, (i[3],i[0]), (i[1],i[2]), (255,0,255), 2) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) cv2.imshow("VideoCapture",img) if cv2.waitKey(1) & 0xFF == ord('q') and len(loc) == 1: encode = face_recognition.face_encodings(img,loc)[0] knownFaces.append(encode) knownNames.append(name) cv2.destroyWindow("VideoCapture") break; print(loc) print(knownFaces) print(knownNames) elif n == '99': screen_clear() print("\n\n\n\t\t\t[Exting...] ThankYou\n\n\n\n\n\n\n\n\n ") exit() else: screen_clear() print("\n\n\n\t\t\tWrong Choice") time.sleep(2)
py
1a3afcdb7b01f0463f94d71a737cf7af3dfb84f2
from django.shortcuts import render # Create your views here. from datetime import datetime from info.forms import CovidDataForm from info.models import CovidData from django.views.generic import ListView,CreateView from django import forms #REST from django.http import HttpResponse from django.shortcuts import redirect, get_object_or_404 from rest_framework import status, viewsets from rest_framework.views import APIView from . serializers import CovidDataSerializer class HomeListViewNew(ListView): queryset = CovidData.objects.all() template_name = "info/home_new.html" class CovidDataCreateForm(forms.ModelForm): class Meta: model= CovidData fields = '__all__' #fields = ['country_region', 'province_state', 'fips', 'active_cases' ] class CovidDataCreateViewV2(CreateView): model = CovidData form_class = CovidDataCreateForm template_name = "info/covid_create.html" def about(request): return render(request, "info/about.html") def contact(request): return render(request, "info/contact.html") # Add this code elsewhere in the file: def logdata(request): form = CovidDataForm(request.POST or None) if request.method == "POST": if form.is_valid(): message = form.save(commit=False) message.save() return redirect("home-new") else: return render(request, "info/enter_data.html", {"form": form}) class CovidDataViewSet(viewsets.ModelViewSet): """ API endpoint that allows users to be viewed or edited. """ queryset = CovidData.objects.all() serializer_class = CovidDataSerializer