max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
objectModel/Python/tests/cdm/cdm_collection/test_cdm_trait_collection.py | Microsoft/CDM | 265 | 11129965 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import cast
import unittest
from cdm.utilities import Constants
from tests.common import async_test
from cdm.objectmodel import CdmTraitDefinition, CdmTraitReference
from .cdm_collection_helper_functions import generate_manifest
class CdmTraitCollectionTests(unittest.TestCase):
@async_test
def test_cdm_trait_collection_add(self):
manifest = generate_manifest()
trait = CdmTraitDefinition(manifest.ctx, 'TraitName', None)
other_trait = CdmTraitDefinition(manifest.ctx, 'Name of other Trait', None)
manifest._trait_cache = dict()
added_trait = manifest.exhibits_traits.append(trait)
added_other_trait = manifest.exhibits_traits.append(other_trait)
list_of_args = [[Constants._INCREMENTAL_PATTERN_PARAMETER_NAME, 'test'], ['fullDataPartitionPatternName', 'name']]
added_incremental_trait = manifest.exhibits_traits.append(Constants._INCREMENTAL_TRAIT_NAME, list_of_args)
self.assertEqual(None, manifest._trait_cache)
self.assertEqual(3, len(manifest.exhibits_traits))
self.assertEqual(trait, manifest.exhibits_traits[0].explicit_reference)
self.assertEqual(other_trait, manifest.exhibits_traits[1].explicit_reference)
self.assertEqual(added_trait, manifest.exhibits_traits[0])
self.assertEqual(added_other_trait, manifest.exhibits_traits[1])
self.assertEqual(added_incremental_trait, manifest.exhibits_traits[2])
self.assertEqual(2, len(manifest.exhibits_traits[2].arguments))
self.assertEqual('test', manifest.exhibits_traits[2].arguments.fetch_value(Constants._INCREMENTAL_PATTERN_PARAMETER_NAME))
self.assertEqual('name', manifest.exhibits_traits[2].arguments.fetch_value('fullDataPartitionPatternName'))
self.assertEqual(manifest, manifest.exhibits_traits[0].owner)
@async_test
def test_cdm_trait_collection_insert(self):
manifest = generate_manifest()
trait = CdmTraitReference(manifest.ctx, 'TraitName', None)
other_trait = CdmTraitReference(manifest.ctx, 'Name of other Trait', None)
manifest._trait_cache = dict()
manifest.exhibits_traits.insert(0, trait)
manifest.exhibits_traits.insert(0, other_trait)
self.assertEqual(None, manifest._trait_cache)
self.assertEqual(2, len(manifest.exhibits_traits))
self.assertEqual(other_trait, manifest.exhibits_traits[0])
self.assertEqual(trait, manifest.exhibits_traits[1])
self.assertEqual(manifest, manifest.exhibits_traits[0].owner)
@async_test
def test_cdm_trait_collection_add_range(self):
manifest = generate_manifest()
trait = CdmTraitDefinition(manifest.ctx, 'TraitName', None)
other_trait = CdmTraitDefinition(manifest.ctx, 'Name of other Trait', None)
trait_list = [trait, other_trait]
manifest.exhibits_traits.extend(trait_list)
self.assertEqual(2, len(manifest.exhibits_traits))
self.assertEqual(trait, manifest.exhibits_traits[0].explicit_reference)
self.assertEqual(other_trait, manifest.exhibits_traits[1].explicit_reference)
self.assertEqual(manifest, manifest.exhibits_traits[0].owner)
@async_test
def test_cdm_trait_collection_remove(self):
manifest = generate_manifest()
trait = CdmTraitDefinition(manifest.ctx, 'TraitName', None)
other_trait = CdmTraitDefinition(manifest.ctx, 'Name of other Trait', None)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits.append(other_trait)
self.assertEqual(2, len(manifest.exhibits_traits))
manifest._trait_cache = dict()
manifest.exhibits_traits.remove(trait)
self.assertEqual(1, len(manifest.exhibits_traits))
self.assertEqual(None, manifest._trait_cache)
manifest.exhibits_traits.remove(trait)
self.assertEqual(1, len(manifest.exhibits_traits))
self.assertEqual(other_trait, manifest.exhibits_traits[0].explicit_reference)
manifest.exhibits_traits.remove('Name of other Trait')
self.assertEqual(0, len(manifest.exhibits_traits))
manifest.exhibits_traits.append(trait)
self.assertEqual(1, len(manifest.exhibits_traits))
manifest.exhibits_traits.remove(manifest.exhibits_traits[0])
self.assertEqual(0, len(manifest.exhibits_traits))
@async_test
def test_cdm_trait_collection_remove_at(self):
manifest = generate_manifest()
trait = CdmTraitDefinition(manifest.ctx, 'TraitName', None)
other_trait = CdmTraitDefinition(manifest.ctx, 'Name of other Trait', None)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits.append(other_trait)
manifest.exhibits_traits.remove(trait)
self.assertTrue(trait not in manifest.exhibits_traits)
self.assertEqual(None, manifest._trait_cache)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits.pop(1)
self.assertEqual(None, manifest._trait_cache)
self.assertEqual(1, len(manifest.exhibits_traits))
self.assertEqual(other_trait, manifest.exhibits_traits[0].explicit_reference)
@async_test
def test_cdm_trait_collection_index_of(self):
manifest = generate_manifest()
trait = CdmTraitDefinition(manifest.ctx, 'TraitName', None)
other_trait = CdmTraitDefinition(manifest.ctx, 'Name of other Trait', None)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits.append(other_trait)
index = manifest.exhibits_traits.index(trait)
self.assertEqual(0, index)
index = manifest.exhibits_traits.index(other_trait)
self.assertEqual(1, index)
index = manifest.exhibits_traits.index(manifest.exhibits_traits[0])
self.assertEqual(0, index)
index = manifest.exhibits_traits.index(manifest.exhibits_traits[1])
self.assertEqual(1, index)
index = manifest.exhibits_traits.index('TraitName')
self.assertEqual(0, index)
index = manifest.exhibits_traits.index('Name of other Trait')
self.assertEqual(1, index)
@async_test
def test_cdm_trait_collection_remove_only_from_property(self):
manifest = generate_manifest()
trait = CdmTraitReference(manifest.ctx, 'TraitName', None)
other_trait = CdmTraitReference(manifest.ctx, 'Name of other Trait', None)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits.append(other_trait)
self.assertFalse(trait.is_from_property)
self.assertFalse(other_trait.is_from_property)
self.assertEqual(2, len(manifest.exhibits_traits))
manifest.exhibits_traits.remove(trait, True)
self.assertTrue(trait in manifest.exhibits_traits)
self.assertEqual(2, len(manifest.exhibits_traits))
other_trait.is_from_property = True
manifest.exhibits_traits.remove(other_trait, True)
self.assertTrue(other_trait not in manifest.exhibits_traits)
self.assertEqual(1, len(manifest.exhibits_traits))
self.assertEqual(trait, manifest.exhibits_traits[0])
@async_test
def test_cdm_trait_collection_remove_prioritize_from_property(self):
manifest = generate_manifest()
trait = CdmTraitReference(manifest.ctx, 'TraitName', None)
other_trait = CdmTraitReference(manifest.ctx, 'Name of other Trait', None)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits.append(other_trait)
trait_copy_from_property = CdmTraitReference(manifest.ctx, 'TraitName', None)
trait_copy_from_property.is_from_property = True
manifest.exhibits_traits.append(trait_copy_from_property)
self.assertEqual(3, len(manifest.exhibits_traits))
manifest.exhibits_traits.remove('TraitName')
self.assertTrue(trait_copy_from_property not in manifest.exhibits_traits)
self.assertEqual(2, len(manifest.exhibits_traits))
self.assertEqual(trait, manifest.exhibits_traits[0])
self.assertEqual(other_trait, manifest.exhibits_traits[1])
@async_test
def test_cdm_trait_collection_remove_trait_definition_prioritize_from_property(self):
manifest = generate_manifest()
trait = CdmTraitDefinition(manifest.ctx, 'TraitName', None)
other_trait = CdmTraitDefinition(manifest.ctx, 'Name of other Trait', None)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits.append(other_trait)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits[2].is_from_property = True
manifest.exhibits_traits.append(other_trait)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits[4].is_from_property = True
manifest.exhibits_traits.append(other_trait)
self.assertEqual(6, len(manifest.exhibits_traits))
self.assertTrue(manifest.exhibits_traits[2].is_from_property)
manifest.exhibits_traits.remove(trait)
self.assertEqual('TraitName', cast('CdmTraitDefinition', manifest.exhibits_traits[0].explicit_reference).trait_name)
self.assertEqual('Name of other Trait', manifest.exhibits_traits[2].explicit_reference.trait_name)
self.assertEqual('TraitName', manifest.exhibits_traits[3].explicit_reference.trait_name)
@async_test
def test_cdm_trait_collection_index_of_only_from_property(self):
manifest = generate_manifest()
trait = CdmTraitDefinition(manifest.ctx, 'TraitName', None)
other_trait = CdmTraitDefinition(manifest.ctx, 'Name of other Trait', None)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits.append(other_trait)
self.assertFalse(manifest.exhibits_traits[0].is_from_property)
self.assertFalse(manifest.exhibits_traits[1].is_from_property)
index = manifest.exhibits_traits.index(trait.trait_name, True)
self.assertEqual(-1, index)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits.append(other_trait)
manifest.exhibits_traits.append(trait)
manifest.exhibits_traits.append(other_trait)
self.assertEqual(6, len(manifest.exhibits_traits))
manifest.exhibits_traits[2].is_from_property = True
index = manifest.exhibits_traits.index(trait.trait_name, True)
self.assertEqual(2, index)
index = manifest.exhibits_traits.index(trait.trait_name)
self.assertEqual(2, index)
@async_test
def test_cdm_trait_collection_clear(self):
manifest = generate_manifest()
manifest.exhibits_traits.append('trait1')
manifest.exhibits_traits.append('trait2')
manifest._trait_cache = dict()
manifest.exhibits_traits.clear()
self.assertEqual(0, len(manifest.exhibits_traits))
self.assertEqual(None, manifest._trait_cache)
|
django_pgviews/__init__.py | djr5/django-pgviews | 182 | 11129990 | default_app_config = 'django_pgviews.apps.ViewConfig'
|
pyNastran/dev/bdf_vectorized/cards/deqatn.py | luzpaz/pyNastran | 293 | 11130014 | # coding: utf-8
"""
Defines the DEQATN class and sub-functions.
The capitalization of the sub-functions is important.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from numpy import (
cos, sin, tan, log, log10, mean, exp, sqrt, square, mod, abs, sum,
arcsin as asin, arccos as acos, arctan as atan, arctan2 as atan2,
arcsinh as asinh, arccosh as acosh, arctanh as atanh)
# atan2h
from numpy.linalg import norm # type: ignore
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.cards.deqatn import lines_to_eqs
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
def pi(num):
"""weird way to multiply p by a number"""
return np.pi * num
def rss(*args): # good
"""2-norm; generalized magnitude of vector for N components"""
return norm(args)
def avg(*args):
"""average"""
return np.mean(args)
def ssq(*args):
"""sum of squares"""
return np.square(args).sum()
def logx(x, y):
"""log base_x(y)"""
return np.log(y**x) / np.log(x)
def dim(x, y):
"""positive difference"""
return x - min(x, y)
def db(p, pref):
"""sound pressure in decibels"""
return 20. * np.log(p / pref)
def atan2h(x, y):
raise NotImplementedError()
def invdb(dbi, pref):
"""inverse Db"""
return 10. ** (dbi / 20. + log(pref))
def dba(p, pref, f):
"""
sound pressure in decibels (perceived)
Parameters
----------
p : float
structural responses or acoustic pressure
f : float
forcing frequency
pref : float
reference pressure
Returns
-------
dbi : float
acoustic pressure in Decibels
"""
ta1, ta2 = _get_ta(f)
return 20. * np.log(p / pref) + 10 * log(ta1) + 10. * log(ta2)
def invdba(dbai, pref, f):
"""
Inverse Dba
Parameters
----------
dbai : float
acoustic pressure in Decibels (perceived)
f : float
forcing frequency
pref : float
reference pressure
Returns
-------
p : float
structural responses or acoustic pressure
"""
ta1, ta2 = _get_ta(f)
#dbai = dba(p, pref, f)
return 10. ** ((dbai - 10. * log(ta1) - 10. * log(ta2))/20)
def _get_ta(f):
"""gets the factors for dba, invdba"""
k1 = 2.242882e16
k3 = 1.562339
p1 = 20.598997
p2 = 107.65265
p3 = 737.86223
p4 = 12194.22
ta1 = k3 * f**4 / ((f**2 + p2**2) * (f**2 + p3**2))
ta2 = k1 * f**4 / ((f**2 + p1**2)**2 * (f**2 + p4**2)**2)
return ta1, ta2
class DEQATN(BaseCard): # needs work...
"""
Design Equation Definition
Defines one or more equations for use in design sensitivity analysis.
+--------+------+-----+-----+-----+-----+-------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+========+======+=====+=====+=====+=====+=======+=====+
| DEQATN | EQID | EQUATION |
+--------+------+-------------------------------------+
| | EQUATION (cont.) |
+--------+--------------------------------------------+
"""
type = 'DEQATN'
def __init__(self, equation_id, eqs, comment=''):
"""
Creates a DEQATN card
Parameters
----------
equation_id : int
the id of the equation
eqs : List[str]
the equations, which may overbound the field
split them by a semicolon (;)
comment : str; default=''
a comment for the card
DEQATN 41 F1(A,B,C,D,R) = A+B *C–(D**3 + 10.0) + sin(PI(1) * R)
+ A**2 / (B - C); F = A + B - F1 * D
def F1(A, B, C, D, R):
F1 = A+B *C-(D**3 + 10.0) + sin(PI(1) * R) + A**2 / (B – C)
F = A + B - F1 * D
return F
eqs = [
'F1(A,B,C,D,R) = A+B *C–(D**3 + 10.0) + sin(PI(1) * R) + A**2 / (B – C)',
'F = A + B – F1 * D',
]
>>> deqatn = DEQATN(41, eq, comment='')
"""
if comment:
self.comment = comment
self.model = None
#self.dtable = None
self.func = None
#self.name = name
self.equation_id = equation_id
self.eqs = eqs
self.func_str = ''
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a DEQATN card from ``BDF.add_card(...)``
Parameters
----------
card : List[str]
this card is special and is not a ``BDFCard`` like other cards
comment : str; default=''
a comment for the card
"""
#print(card)
line0 = card[0]
if '\t' in line0:
line0 = line0.expandtabs()
name_eqid = line0[:16]
#print('name_eqid = %r' % name_eqid)
assert ',' not in name_eqid, name_eqid
try:
name, eq_id = name_eqid.split()
assert name.strip().upper() == 'DEQATN', card
except ValueError:
msg = 'cannot split %r\n' % name_eqid
msg += "Expected data of the form 'DEQATN 100'\n"
msg += 'card=%s' % card
raise ValueError(msg)
equation_id = int(eq_id)
# combine the equations into a single organized block
line0_eq = line0[16:]
eqs_temp = [line0_eq] + card[1:]
eqs = lines_to_eqs(eqs_temp)
return DEQATN(equation_id, eqs, comment=comment)
def _setup_equation(self):
"""
creates an executable equation object from self.eqs
x = 10.
>>> deqatn.func(x)
42.0
>>> deqatn.func_str
def stress(x):
x = float(x)
return x + 32.
"""
default_values = {}
dtable_ref = self.model.dtable
if dtable_ref is not None:
default_values = dtable_ref.default_values
func_name, nargs, func_str = fortran_to_python(
self.eqs, default_values, str(self))
self.func_str = func_str
self.func_name = func_name
exec(func_str)
#print(locals().keys())
func = locals()[func_name]
setattr(self, func_name, func)
#print(func)
self.func = func
self.nargs = nargs
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
self.model = model
# TODO: get defaults from DTABLE
# TODO: get limits from DCONSTR
#self.dtable = model.dtable
#self.dtable_ref = self.dtable
self._setup_equation()
def uncross_reference(self) -> None:
del self.model
del self.func
del self.f
# del getattr(self, self.func_name)
del self.func_name
del self.nargs
#del self.dtable
def evaluate(self, *args):
"""Makes a call to self.func"""
#args2 = args[:self.nargs]
#print('args =', args2)
if len(args) > self.nargs:
msg = 'len(args) > nargs\n'
msg += 'nargs=%s len(args)=%s; func_name=%s' % (
self.nargs, len(args), self.func_name)
raise RuntimeError(msg)
return self.func(*args)
#self.func(*args)
def raw_fields(self):
return [self.write_card()]
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
#self.evaluate(1, 2)
eqs = split_equations(self.eqs)
equation_line0 = eqs[0]
#assert len(equation_line0) <= 56, equation_line0
msg = 'DEQATN %-8i%-56s\n' % (self.equation_id, equation_line0)
assert len(equation_line0) <= 56, equation_line0
for eq in eqs[1:]:
msg += ' %-64s\n' % eq
assert len(eq) <= 64, eq
#print(msg)
return msg
def split_equations(lines):
"""takes an overbounded DEQATN card and shortens it"""
# first line must be < 56
# second line may be < 64
lines2 = []
for i, line in enumerate(lines):
#print('-------------------------')
# we'll add ; to the end of each line
if i == 0:
lines2 += _split_equation([], line.strip() + ';', 56)
else:
lines2 += _split_equation([], line.strip() + ';', 64)
# remove the trailing semicolon
lines2[-1] = lines2[-1][:-1]
return lines2
def _split_equation(lines_out, line, n, isplit=0):
"""
Takes an overbounded DEQATN line and shortens it using recursion
Parameters
----------
lines_out : List[str]
len(lines) = 0 : first iteration
len(lines) = 1 : second iteration
line : str
the line to split
n : int
the maximum number of characters allowed
the first line of the DEQATN has a different number of fields
allowed vs. subsequent lines
isplit : int; default=0
the number of levels deep in the recursive function we are
Returns
-------
lines_out : List[str]
the long line broken into shorter lines
"""
#print('n=%s -> line=%r len=%s' % (n, line, len(line)))
if len(line) <= n:
lines_out.append(line.strip())
return lines_out
# equation must be split
line0 = line[:n][::-1].replace('**', '^')
# fore, aft = line0.split('+-()*', 1)
#print('line0 = %r; len=%s' % (str(line0[::-1]), len(line0)))
out = {}
for operator in ('+', '*', '^', '-', ')', ',', '='):
if operator in line0:
i = line0.index(operator)
out[i] = operator
try:
imin = min(out)
except ValueError:
msg = "Couldn't find an operator ()+-/*= in %r\n" % line[n:]
msg += 'line = %r' % line
raise ValueError(msg)
operator = out[imin]
#print('operator = %r' % operator)
fore, aft = line0.split(operator, 1)
i = len(aft) + 1
line_out = line[:i]
#print('appending %r; len=%s' % (line_out, len(line_out)))
#print('fore = %r' % fore[::-1])
#print('aft = %r' % aft[::-1])
lines_out.append(line_out.replace('^', '**').strip())
isplit += 1
if isplit > 10:
raise RuntimeError()
lines_out = _split_equation(lines_out, line[i:], n, isplit+1)
return lines_out
def fortran_to_python_short(line, default_values):
"""the function used by the DRESP2"""
func_str = 'def func(args):\n'
func_str += ' return %s(args)\n' % line.strip()
d = {}
exec(func_str, globals(), d)
return d['func']
def fortran_to_python(lines, default_values, comment=''):
"""
Creates the python function
Parameters
----------
lines : List[str]
the equations to write broken up by statement
default_values : dict[name] = value
the default values from the DTABLE card
def f(x, y=10.):
'''
$ deqatn
DEQATN 1000 f(x,y) = x+y
'''
try:
if isinstance(x, (int, float, str)):
x = float(x)
if isinstance(y, (int, float, str)):
y = float(y)
except Exception:
print(locals())
raise
f = x + y
return f
"""
msg = ''
variables = []
assert len(lines) > 0, lines
for i, line in enumerate(lines):
#print('--------------------')
line = line.lower()
try:
# f(x, y) = 10.
# f(x, y) = abs(x) + y
# f = 42.
f, eq = line.split('=')
except Exception:
if '=' not in line:
raise SyntaxError('= not found in %r' % (line))
else:
msg = 'only 1 = sign may be found a line\n'
msg += 'line = %r\n' % line
if len(lines) > 1:
msg += 'lines:\n%s' % '\n'.join(lines)
raise SyntaxError(msg)
f = f.strip()
eq = eq.strip().rstrip(';')
#print('f=%r eq=%r' % (f, eq))
if i == 0:
func_name, f, msg, out, variables = write_function_header(
f, eq, default_values, comment)
#print(msg)
else:
out = f
msg += ' %s = %s\n' % (out, eq)
msg += ' return %s' % f
#print(msg)
nargs = len(variables)
return func_name, nargs, msg
def write_function_header(f, eq, default_values, comment=''):
"""
initializes the python function
def f(x, y=10.):
'''
$ deqatn
DEQATN 1000 f(x,y) = x+y
'''
try:
if isinstance(x, (int, float, str)):
x = float(x)
if isinstance(y, (int, float, str)):
y = float(y)
except Exception:
print(locals())
raise
Parameters
----------
f : str
the function header
f(a, b, c)
eq : str
the value on the other side of the equals sign (f=eq)
1.
max(a, b, c)
default_values : dict[name] = value
the default values from the DTABLE card
Returns
-------
func_name : str
the name of the function ``f``
msg : str
see above
variables : List[str]
the variables used by the equation header
a, b, c
"""
msg = ''
out = ''
try:
float(eq)
is_float = True
except ValueError:
is_float = False
if is_float:
#print('float', eq)
func_name, arguments = f.strip('(,)').split('(')
func_name = func_name.strip(' ')
variables = arguments.split(',')
#print('func_name=%r' % func_name)
#val = float(eq)
msg += _write_function_line(func_name, variables, default_values)
msg += _write_comment(comment)
msg += _write_variables(variables)
msg += ' %s = %s\n' % (func_name, eq)
else:
#print('not float', eq)
#print(eq)
#asdf
func_name, arguments = f.strip('(,)').split('(')
func_name = func_name.strip(' ')
variables = arguments.split(',')
#msg += 'def %s:\n' % f
msg += _write_function_line(func_name, variables, default_values)
msg += _write_comment(comment)
msg += _write_variables(variables)
#for var in variables:
#msg += ' %s = float(%s)\n' % (var, var)
#print(msg)
#is_eq_defined = True
#print('out = %r' % out)
#print('func_name = %r' % func_name)
#print('eq = %r' % eq)
#out += eq
msg += ' %s = %s\n' % (func_name, eq)
#f = eq
return func_name, f, msg, out, variables
def _write_function_line(func_name, variables, default_values):
"""writes the ``def f(x, y, z=1.):`` part of the function"""
vals = []
is_default = False
#print('default_values = %s' % default_values)
for var in variables:
if var in default_values:
vals.append('%s=%s' % (var, default_values[var]))
is_default = True
else:
vals.append('%s' % (var))
if is_default:
msg = 'default variables must be set at the end of the function\n'
msg += 'variables = %s\n' % variables
msg += 'default_values = %s' % default_values
raise RuntimeError(msg)
vals2 = ', '.join(vals)
msg = 'def %s(%s):\n' % (func_name, vals2)
return msg
def _write_comment(comment):
"""writes the deqatn to the comment block"""
lines = comment.split('\n')
msgi = '\n '.join(lines)
msg = ' """\n %s"""\n' % msgi
return msg
def _write_variables(variables):
"""type checks the inputs"""
msg = ' try:\n'
for var in variables:
#msg += " assert isinstance(%s, float), '%s is not a float; type(%s)=%s' % (%s)")
#msg += ' %s = float(%s)\n' % (var, var)
msg += ' if isinstance(%s, (int, float, str)):\n' % var
msg += ' %s = float(%s)\n' % (var, var)
msg += ' except Exception:\n'
msg += ' print(locals())\n'
msg += ' raise\n'
return msg
|
venv/lib/python3.8/site-packages/setuptools/_vendor/packaging/requirements.py | Joshua-Barawa/My-Photos | 38,667 | 11130018 | <filename>venv/lib/python3.8/site-packages/setuptools/_vendor/packaging/requirements.py
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import string
import re
from setuptools.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from setuptools.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from setuptools.extern.pyparsing import Literal as L # noqa
from setuptools.extern.six.moves.urllib import parse as urlparse
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r"[^ ]+")("url")
URL = AT + URI
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(
VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start : t._original_end])
)
MARKER_SEPARATOR = SEMICOLON
MARKER = MARKER_SEPARATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
# setuptools.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
# issue #104
REQUIREMENT.parseString("x[]")
class Requirement(object):
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string):
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
'Parse error at "{0!r}": {1}'.format(
requirement_string[e.loc : e.loc + 8], e.msg
)
)
self.name = req.name
if req.url:
parsed_url = urlparse.urlparse(req.url)
if parsed_url.scheme == "file":
if urlparse.urlunparse(parsed_url) != req.url:
raise InvalidRequirement("Invalid URL given")
elif not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc
):
raise InvalidRequirement("Invalid URL: {0}".format(req.url))
self.url = req.url
else:
self.url = None
self.extras = set(req.extras.asList() if req.extras else [])
self.specifier = SpecifierSet(req.specifier)
self.marker = req.marker if req.marker else None
def __str__(self):
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append(" ")
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self):
return "<Requirement({0!r})>".format(str(self))
|
integration-tests/test_psql_parity.py | boazberman/arrow-datafusion | 1,801 | 11130023 | <reponame>boazberman/arrow-datafusion
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import subprocess
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
pg_db, pg_user, pg_host, pg_port = [
os.environ.get(i)
for i in (
"POSTGRES_DB",
"POSTGRES_USER",
"POSTGRES_HOST",
"POSTGRES_PORT",
)
]
CREATE_TABLE_SQL_FILE = "integration-tests/create_test_table.sql"
def generate_csv_from_datafusion(fname: str):
return subprocess.check_output(
[
"./target/debug/datafusion-cli",
"-f",
CREATE_TABLE_SQL_FILE,
"-f",
fname,
"--format",
"csv",
"-q",
],
)
def generate_csv_from_psql(fname: str):
return subprocess.check_output(
[
"psql",
"-d",
pg_db,
"-h",
pg_host,
"-p",
pg_port,
"-U",
pg_user,
"-X",
"--csv",
"-f",
fname,
]
)
root = Path(os.path.dirname(__file__)) / "sqls"
test_files = set(root.glob("*.sql"))
class TestPsqlParity:
def test_tests_count(self):
assert len(test_files) == 21, "tests are missed"
@pytest.mark.parametrize("fname", test_files)
def test_sql_file(self, fname):
datafusion_output = pd.read_csv(io.BytesIO(generate_csv_from_datafusion(fname)))
psql_output = pd.read_csv(io.BytesIO(generate_csv_from_psql(fname)))
np.testing.assert_allclose(datafusion_output, psql_output, equal_nan=True)
|
judge/tasks.py | sokoide/OnlineJudge | 5,237 | 11130031 | <gh_stars>1000+
import dramatiq
from account.models import User
from submission.models import Submission
from judge.dispatcher import JudgeDispatcher
from utils.shortcuts import DRAMATIQ_WORKER_ARGS
@dramatiq.actor(**DRAMATIQ_WORKER_ARGS())
def judge_task(submission_id, problem_id):
uid = Submission.objects.get(id=submission_id).user_id
if User.objects.get(id=uid).is_disabled:
return
JudgeDispatcher(submission_id, problem_id).judge()
|
pyts/transformation/__init__.py | jmrichardson/pyts | 1,217 | 11130036 | """The :mod:`pyts.transformation` module includes transformation algorithms."""
from .bag_of_patterns import BagOfPatterns
from .boss import BOSS
from .rocket import ROCKET
from .shapelet_transform import ShapeletTransform
from .weasel import WEASEL
__all__ = ['BagOfPatterns', 'BOSS', 'ROCKET', 'ShapeletTransform', 'WEASEL']
|
parsers/test_parser.py | plympton/newsdiffs | 317 | 11130041 | #!/usr/bin/python
"""
Test a parser. For example:
$ python test_parser.py nyt.NYTParser
[list of URLs to check]
$ python test_parser.py nyt.NYTParser <one of those URLs>
[text of article to store]
"""
import sys
try:
parsername = sys.argv[1]
except IndexError:
print 'Usage: test_parser.py <modulename>.<classname> [<url_to_check>]'
sys.exit()
try:
url = sys.argv[2]
except IndexError:
url = None
module, classname = parsername.rsplit('.', 1)
parser = getattr(__import__(module, globals(), fromlist=[classname]), classname)
if url:
parsed_article = parser(url)
print unicode(parsed_article)
else:
links = parser.feed_urls()
print '\n'.join(links)
|
Tools/python37/Lib/urllib/robotparser.py | xxroot/android_universal | 207 | 11130051 | """ robotparser.py
Copyright (C) 2000 <NAME>
You can choose between two licenses when using this package:
1) GNU GPLv2
2) PSF license for Python 2.2
The robots.txt Exclusion Protocol is implemented as specified in
http://www.robotstxt.org/norobots-rfc.txt
"""
import collections
import urllib.parse
import urllib.request
__all__ = ["RobotFileParser"]
RequestRate = collections.namedtuple("RequestRate", "requests seconds")
class RobotFileParser:
""" This class provides a set of methods to read, parse and answer
questions about a single robots.txt file.
"""
def __init__(self, url=''):
self.entries = []
self.default_entry = None
self.disallow_all = False
self.allow_all = False
self.set_url(url)
self.last_checked = 0
def mtime(self):
"""Returns the time the robots.txt file was last fetched.
This is useful for long-running web spiders that need to
check for new robots.txt files periodically.
"""
return self.last_checked
def modified(self):
"""Sets the time the robots.txt file was last fetched to the
current time.
"""
import time
self.last_checked = time.time()
def set_url(self, url):
"""Sets the URL referring to a robots.txt file."""
self.url = url
self.host, self.path = urllib.parse.urlparse(url)[1:3]
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
try:
f = urllib.request.urlopen(self.url)
except urllib.error.HTTPError as err:
if err.code in (401, 403):
self.disallow_all = True
elif err.code >= 400 and err.code < 500:
self.allow_all = True
else:
raw = f.read()
self.parse(raw.decode("utf-8").splitlines())
def _add_entry(self, entry):
if "*" in entry.useragents:
# the default entry is considered last
if self.default_entry is None:
# the first default entry wins
self.default_entry = entry
else:
self.entries.append(entry)
def parse(self, lines):
"""Parse the input lines from a robots.txt file.
We allow that a user-agent: line is not preceded by
one or more blank lines.
"""
# states:
# 0: start state
# 1: saw user-agent line
# 2: saw an allow or disallow line
state = 0
entry = Entry()
self.modified()
for line in lines:
if not line:
if state == 1:
entry = Entry()
state = 0
elif state == 2:
self._add_entry(entry)
entry = Entry()
state = 0
# remove optional comment and strip line
i = line.find('#')
if i >= 0:
line = line[:i]
line = line.strip()
if not line:
continue
line = line.split(':', 1)
if len(line) == 2:
line[0] = line[0].strip().lower()
line[1] = urllib.parse.unquote(line[1].strip())
if line[0] == "user-agent":
if state == 2:
self._add_entry(entry)
entry = Entry()
entry.useragents.append(line[1])
state = 1
elif line[0] == "disallow":
if state != 0:
entry.rulelines.append(RuleLine(line[1], False))
state = 2
elif line[0] == "allow":
if state != 0:
entry.rulelines.append(RuleLine(line[1], True))
state = 2
elif line[0] == "crawl-delay":
if state != 0:
# before trying to convert to int we need to make
# sure that robots.txt has valid syntax otherwise
# it will crash
if line[1].strip().isdigit():
entry.delay = int(line[1])
state = 2
elif line[0] == "request-rate":
if state != 0:
numbers = line[1].split('/')
# check if all values are sane
if (len(numbers) == 2 and numbers[0].strip().isdigit()
and numbers[1].strip().isdigit()):
entry.req_rate = RequestRate(int(numbers[0]), int(numbers[1]))
state = 2
if state == 2:
self._add_entry(entry)
def can_fetch(self, useragent, url):
"""using the parsed robots.txt decide if useragent can fetch url"""
if self.disallow_all:
return False
if self.allow_all:
return True
# Until the robots.txt file has been read or found not
# to exist, we must assume that no url is allowable.
# This prevents false positives when a user erroneously
# calls can_fetch() before calling read().
if not self.last_checked:
return False
# search for given user agent matches
# the first match counts
parsed_url = urllib.parse.urlparse(urllib.parse.unquote(url))
url = urllib.parse.urlunparse(('','',parsed_url.path,
parsed_url.params,parsed_url.query, parsed_url.fragment))
url = urllib.parse.quote(url)
if not url:
url = "/"
for entry in self.entries:
if entry.applies_to(useragent):
return entry.allowance(url)
# try the default entry last
if self.default_entry:
return self.default_entry.allowance(url)
# agent not found ==> access granted
return True
def crawl_delay(self, useragent):
if not self.mtime():
return None
for entry in self.entries:
if entry.applies_to(useragent):
return entry.delay
return self.default_entry.delay
def request_rate(self, useragent):
if not self.mtime():
return None
for entry in self.entries:
if entry.applies_to(useragent):
return entry.req_rate
return self.default_entry.req_rate
def __str__(self):
entries = self.entries
if self.default_entry is not None:
entries = entries + [self.default_entry]
return '\n'.join(map(str, entries)) + '\n'
class RuleLine:
"""A rule line is a single "Allow:" (allowance==True) or "Disallow:"
(allowance==False) followed by a path."""
def __init__(self, path, allowance):
if path == '' and not allowance:
# an empty value means allow all
allowance = True
path = urllib.parse.urlunparse(urllib.parse.urlparse(path))
self.path = urllib.parse.quote(path)
self.allowance = allowance
def applies_to(self, filename):
return self.path == "*" or filename.startswith(self.path)
def __str__(self):
return ("Allow" if self.allowance else "Disallow") + ": " + self.path
class Entry:
"""An entry has one or more user-agents and zero or more rulelines"""
def __init__(self):
self.useragents = []
self.rulelines = []
self.delay = None
self.req_rate = None
def __str__(self):
ret = []
for agent in self.useragents:
ret.append(f"User-agent: {agent}")
if self.delay is not None:
ret.append(f"Crawl-delay: {self.delay}")
if self.req_rate is not None:
rate = self.req_rate
ret.append(f"Request-rate: {rate.requests}/{rate.seconds}")
ret.extend(map(str, self.rulelines))
ret.append('') # for compatibility
return '\n'.join(ret)
def applies_to(self, useragent):
"""check if this entry applies to the specified agent"""
# split the name token and make it lower case
useragent = useragent.split("/")[0].lower()
for agent in self.useragents:
if agent == '*':
# we have the catch-all agent
return True
agent = agent.lower()
if agent in useragent:
return True
return False
def allowance(self, filename):
"""Preconditions:
- our agent applies to this entry
- filename is URL decoded"""
for line in self.rulelines:
if line.applies_to(filename):
return line.allowance
return True
|
tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py | PowerOlive/mindspore | 3,200 | 11130084 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import Primitive
from mindspore.ops import operations as P
from mindspore.ops import _constants as Constants
add = P.Add()
mul = P.Mul()
fused_mul_add = Primitive('FusedMulAdd')
make_tuple = Primitive('MakeTuple')
tuple_getitem = Primitive(Constants.kTupleGetItem)
class FnDict:
def __init__(self):
self.fnDict = {}
def __call__(self, fn):
self.fnDict[fn.__name__] = fn
def __getitem__(self, name):
return self.fnDict[name]
def test_mul_add_fusion(tag):
fns = FnDict()
@fns
def before1(x, y, z):
res = mul(x, y)
res = add(res, z)
return res
@fns
def before2(x, y, z):
res = mul(x, y)
res = add(z, res)
return res
@fns
def after(x, y, z):
res = fused_mul_add(x, y, z)
return make_tuple(res)
return fns[tag]
|
plugins/modules/oci_blockstorage_volume_group_actions.py | slmjy/oci-ansible-collection | 108 | 11130090 | <filename>plugins/modules/oci_blockstorage_volume_group_actions.py<gh_stars>100-1000
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_blockstorage_volume_group_actions
short_description: Perform actions on a VolumeGroup resource in Oracle Cloud Infrastructure
description:
- Perform actions on a VolumeGroup resource in Oracle Cloud Infrastructure
- For I(action=change_compartment), moves a volume group into a different compartment within the same tenancy.
For information about moving resources between compartments,
see L(Moving Resources to a Different Compartment,https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes).
version_added: "2.9.0"
author: Oracle (@oracle)
options:
volume_group_id:
description:
- The Oracle Cloud ID (OCID) that uniquely identifies the volume group.
type: str
aliases: ["id"]
required: true
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to move the volume group to.
type: str
required: true
action:
description:
- The action to perform on the VolumeGroup.
type: str
required: true
choices:
- "change_compartment"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action change_compartment on volume_group
oci_blockstorage_volume_group_actions:
# required
volume_group_id: "ocid1.volumegroup.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
action: change_compartment
"""
RETURN = """
volume_group:
description:
- Details of the VolumeGroup resource acted upon by the current operation
returned: on success
type: complex
contains:
availability_domain:
description:
- The availability domain of the volume group.
returned: on success
type: str
sample: Uocm:PHX-AD-1
compartment_id:
description:
- The OCID of the compartment that contains the volume group.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The OCID for the volume group.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state:
description:
- The current state of a volume group.
returned: on success
type: str
sample: PROVISIONING
size_in_mbs:
description:
- The aggregate size of the volume group in MBs.
returned: on success
type: int
sample: 56
size_in_gbs:
description:
- The aggregate size of the volume group in GBs.
returned: on success
type: int
sample: 56
source_details:
description:
- ""
returned: on success
type: complex
contains:
type:
description:
- ""
returned: on success
type: str
sample: volumeGroupBackupId
volume_group_backup_id:
description:
- The OCID of the volume group backup to restore from.
returned: on success
type: str
sample: "ocid1.volumegroupbackup.oc1..xxxxxxEXAMPLExxxxxx"
volume_group_id:
description:
- The OCID of the volume group to clone from.
returned: on success
type: str
sample: "ocid1.volumegroup.oc1..xxxxxxEXAMPLExxxxxx"
volume_ids:
description:
- OCIDs for the volumes in this volume group.
returned: on success
type: list
sample: []
time_created:
description:
- The date and time the volume group was created. Format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
volume_ids:
description:
- OCIDs for the volumes in this volume group.
returned: on success
type: list
sample: []
is_hydrated:
description:
- Specifies whether the newly created cloned volume group's data has finished copying
from the source volume group or backup.
returned: on success
type: bool
sample: true
sample: {
"availability_domain": "Uocm:PHX-AD-1",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"display_name": "display_name_example",
"freeform_tags": {'Department': 'Finance'},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "PROVISIONING",
"size_in_mbs": 56,
"size_in_gbs": 56,
"source_details": {
"type": "volumeGroupBackupId",
"volume_group_backup_id": "ocid1.volumegroupbackup.oc1..xxxxxxEXAMPLExxxxxx",
"volume_group_id": "ocid1.volumegroup.oc1..xxxxxxEXAMPLExxxxxx",
"volume_ids": []
},
"time_created": "2013-10-20T19:20:30+01:00",
"volume_ids": [],
"is_hydrated": true
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.core import BlockstorageClient
from oci.core.models import ChangeVolumeGroupCompartmentDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class VolumeGroupActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
change_compartment
"""
@staticmethod
def get_module_resource_id_param():
return "volume_group_id"
def get_module_resource_id(self):
return self.module.params.get("volume_group_id")
def get_get_fn(self):
return self.client.get_volume_group
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_volume_group,
volume_group_id=self.module.params.get("volume_group_id"),
)
def change_compartment(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ChangeVolumeGroupCompartmentDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.change_volume_group_compartment,
call_fn_args=(),
call_fn_kwargs=dict(
volume_group_id=self.module.params.get("volume_group_id"),
change_volume_group_compartment_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
VolumeGroupActionsHelperCustom = get_custom_class("VolumeGroupActionsHelperCustom")
class ResourceHelper(VolumeGroupActionsHelperCustom, VolumeGroupActionsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
volume_group_id=dict(aliases=["id"], type="str", required=True),
compartment_id=dict(type="str", required=True),
action=dict(type="str", required=True, choices=["change_compartment"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="volume_group",
service_client_class=BlockstorageClient,
namespace="core",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
|
tools/kconfig_new/test/confgen/test_confgen.py | lovyan03/esp-idf | 8,747 | 11130116 | <reponame>lovyan03/esp-idf
#!/usr/bin/env python
import os
import re
import subprocess
import sys
import tempfile
import textwrap
import unittest
from future.utils import iteritems
class ConfgenBaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
self.args = dict()
self.functions = {'in': self.assertIn,
'not in': self.assertNotIn,
'equal': self.assertEqual,
'not equal': self.assertNotEqual}
try:
regex_func = self.assertRegex
except AttributeError:
# Python 2 fallback
regex_func = self.assertRegexpMatches
finally:
self.functions['regex'] = lambda instance, s, expr: regex_func(instance, expr, s) # reverse args order
def setUp(self):
with tempfile.NamedTemporaryFile(prefix='test_confgen_', delete=False) as f:
self.output_file = f.name
self.addCleanup(os.remove, self.output_file)
def invoke_confgen(self, args):
call_args = [sys.executable, '../../confgen.py']
for (k, v) in iteritems(args):
if k != 'output':
if isinstance(v, type('')): # easy Python 2/3 compatible str/unicode
call_args += ['--{}'.format(k), v]
else:
for i in v:
call_args += ['--{}'.format(k), i]
call_args += ['--output', args['output'], self.output_file] # these arguments belong together
subprocess.check_call(call_args)
def invoke_and_test(self, in_text, out_text, test='in'):
"""
Main utility function for testing confgen:
- Runs confgen via invoke_confgen(), using output method pre-set in test class setup
- in_text is the Kconfig file input content
- out_text is some expected output from confgen
- 'test' can be any function key from self.functions dict (see above). Default is 'in' to test if
out_text is a substring of the full confgen output.
"""
with tempfile.NamedTemporaryFile(mode='w+', prefix='test_confgen_', delete=False) as f:
self.addCleanup(os.remove, f.name)
f.write(textwrap.dedent(in_text))
self.args['kconfig'] = f.name
self.invoke_confgen(self.args)
with open(self.output_file) as f_result:
result = f_result.read()
try:
out_text = textwrap.dedent(out_text)
except TypeError:
pass # probably a regex
self.functions[test](self, out_text, result)
class CmakeTestCase(ConfgenBaseTestCase):
@classmethod
def setUpClass(self):
super(CmakeTestCase, self).setUpClass()
self.args.update({'output': 'cmake'})
def testStringEscape(self):
self.invoke_and_test("""
config PASSWORD
string "password"
default "\\\\~!@#$%^&*()\\\""
""", 'set(CONFIG_PASSWORD "\\\\~!@#$%^&*()\\\"")')
def testHexPrefix(self):
self.invoke_and_test(HEXPREFIX_KCONFIG, 'set(CONFIG_HEX_NOPREFIX "0x33")')
self.invoke_and_test(HEXPREFIX_KCONFIG, 'set(CONFIG_HEX_PREFIX "0x77")')
class JsonTestCase(ConfgenBaseTestCase):
@classmethod
def setUpClass(self):
super(JsonTestCase, self).setUpClass()
self.args.update({'output': 'json'})
def testStringEscape(self):
self.invoke_and_test("""
config PASSWORD
string "password"
default "\\\\~!@#$%^&*()\\\""
""", '"PASSWORD": "\\\\~!@#$%^&*()\\\""')
def testHexPrefix(self):
# hex values come out as integers in JSON, due to no hex type
self.invoke_and_test(HEXPREFIX_KCONFIG, '"HEX_NOPREFIX": %d' % 0x33)
self.invoke_and_test(HEXPREFIX_KCONFIG, '"HEX_PREFIX": %d' % 0x77)
class JsonMenuTestCase(ConfgenBaseTestCase):
@classmethod
def setUpClass(self):
super(JsonMenuTestCase, self).setUpClass()
self.args.update({'output': 'json_menus'})
def testMultipleRanges(self):
self.invoke_and_test("""
config IDF_TARGET
string "IDF target"
default "esp32"
config SOME_SETTING
int "setting for the chip"
range 0 100 if IDF_TARGET="esp32s0"
range 0 10 if IDF_TARGET="esp32"
range -10 1 if IDF_TARGET="esp32s2"
""", re.compile(r'"range":\s+\[\s+0,\s+10\s+\]'), 'regex')
def testHexRanges(self):
self.invoke_and_test("""
config SOME_SETTING
hex "setting for the chip"
range 0x0 0xaf if UNDEFINED
range 0x10 0xaf
""", r'"range":\s+\[\s+16,\s+175\s+\]', 'regex')
class ConfigTestCase(ConfgenBaseTestCase):
@classmethod
def setUpClass(self):
super(ConfigTestCase, self).setUpClass()
self.args.update({'output': 'config'})
self.input = """
config TEST
bool "test"
default "n"
"""
def setUp(self):
super(ConfigTestCase, self).setUp()
with tempfile.NamedTemporaryFile(mode='w+', prefix='test_confgen_', delete=False) as f:
self.addCleanup(os.remove, f.name)
self.args.update({'config': f.name}) # this is input in contrast with {'output': 'config'}
f.write(textwrap.dedent("""
CONFIG_TEST=y
CONFIG_UNKNOWN=y
"""))
def testKeepSavedOption(self):
self.invoke_and_test(self.input, 'CONFIG_TEST=y')
def testDiscardUnknownOption(self):
self.invoke_and_test(self.input, 'CONFIG_UNKNOWN', 'not in')
class MakefileTestCase(ConfgenBaseTestCase):
@classmethod
def setUpClass(self):
super(MakefileTestCase, self).setUpClass()
self.args.update({'output': 'makefile'})
def setUp(self):
super(MakefileTestCase, self).setUp()
with tempfile.NamedTemporaryFile(mode='w+', prefix='test_confgen_', delete=False) as f1:
self.addCleanup(os.remove, f1.name)
with tempfile.NamedTemporaryFile(mode='w+', prefix='test_confgen_', delete=False) as f2:
self.addCleanup(os.remove, f2.name)
self.args.update({'env': ['COMPONENT_KCONFIGS_PROJBUILD_SOURCE_FILE={}'.format(f1.name),
'COMPONENT_KCONFIGS_SOURCE_FILE={}'.format(f2.name),
'IDF_TARGET=esp32']})
def testTarget(self):
with open(os.path.join(os.environ['IDF_PATH'], 'Kconfig')) as f:
self.invoke_and_test(f.read(), 'CONFIG_IDF_TARGET="esp32"')
def testHexPrefix(self):
self.invoke_and_test(HEXPREFIX_KCONFIG, 'CONFIG_HEX_NOPREFIX=0x33')
self.invoke_and_test(HEXPREFIX_KCONFIG, 'CONFIG_HEX_PREFIX=0x77')
class HeaderTestCase(ConfgenBaseTestCase):
@classmethod
def setUpClass(self):
super(HeaderTestCase, self).setUpClass()
self.args.update({'output': 'header'})
def testStringEscape(self):
self.invoke_and_test("""
config PASSWORD
string "password"
default "\\\\~!@#$%^&*()\\\""
""", '#define CONFIG_PASSWORD "\\\\~!@#$%^&*()\\\""')
def testHexPrefix(self):
self.invoke_and_test(HEXPREFIX_KCONFIG, '#define CONFIG_HEX_NOPREFIX 0x33')
self.invoke_and_test(HEXPREFIX_KCONFIG, '#define CONFIG_HEX_PREFIX 0x77')
class DocsTestCase(ConfgenBaseTestCase):
@classmethod
def setUpClass(self):
super(DocsTestCase, self).setUpClass()
self.args.update({'output': 'docs',
'env': 'IDF_TARGET=esp32'})
def testChoice(self):
self.invoke_and_test("""
menu "TEST"
choice TYPES
prompt "types"
default TYPES_OP2
help
Description of TYPES
config TYPES_OP1
bool "option 1"
config TYPES_OP2
bool "option 2"
endchoice
endmenu
""", """
TEST
----
Contains:
- :ref:`CONFIG_TYPES`
.. _CONFIG_TYPES:
CONFIG_TYPES
^^^^^^^^^^^^
types
:emphasis:`Found in:` :ref:`test`
Description of TYPES
Available options:
- option 1 (TYPES_OP1)
- option 2 (TYPES_OP2)
""") # this is more readable than regex
# Used by multiple testHexPrefix() test cases to verify correct hex output for each format
HEXPREFIX_KCONFIG = """
config HEX_NOPREFIX
hex "Hex Item default no prefix"
default 33
config HEX_PREFIX
hex "Hex Item default prefix"
default 0x77
"""
if __name__ == '__main__':
unittest.main()
|
examples/packing_on_the_sphere.py | vishalbelsare/pymanopt | 459 | 11130135 | <reponame>vishalbelsare/pymanopt
import autograd.numpy as np
import tensorflow as tf
import theano.tensor as T
import torch
from examples._tools import ExampleRunner
import pymanopt
from pymanopt.manifolds import Elliptope
from pymanopt.solvers import ConjugateGradient
SUPPORTED_BACKENDS = (
"Autograd", "PyTorch", "TensorFlow", "Theano"
)
def create_cost(backend, dimension, num_points, epsilon):
if backend == "Autograd":
@pymanopt.function.Autograd
def cost(X):
Y = X @ X.T
# Shift the exponentials by the maximum value to reduce numerical
# trouble due to possible overflows.
s = np.triu(Y, 1).max()
expY = np.exp((Y - s) / epsilon)
# Zero out the diagonal
expY -= np.diag(np.diag(expY))
u = np.triu(expY, 1).sum()
return s + epsilon * np.log(u)
elif backend == "PyTorch":
@pymanopt.function.PyTorch
def cost(X):
Y = torch.matmul(X, torch.transpose(X, 1, 0))
s = torch.triu(Y, 1).max()
expY = torch.exp((Y - s) / epsilon)
expY = expY - torch.diag(torch.diag(expY))
u = torch.triu(expY, 1).sum()
return s + epsilon * torch.log(u)
elif backend == "TensorFlow":
X = tf.Variable(tf.zeros((num_points, dimension), dtype=np.float64),
name="X")
@pymanopt.function.TensorFlow
def cost(X):
Y = tf.matmul(X, tf.transpose(X))
s = tf.reduce_max(tf.linalg.band_part(Y, 0, -1))
expY = tf.exp((Y - s) / epsilon)
expY = expY - tf.linalg.diag(tf.linalg.diag_part(expY))
u = tf.reduce_sum(tf.linalg.band_part(Y, 0, -1))
return s + epsilon * tf.math.log(u)
elif backend == "Theano":
X = T.matrix()
@pymanopt.function.Theano(X)
def cost(X):
Y = T.dot(X, X.T)
s = T.triu(Y, 1).max()
expY = T.exp((Y - s) / epsilon)
expY = expY - T.diag(T.diag(expY))
u = T.sum(T.triu(expY, 1))
return s + epsilon * T.log(u)
else:
raise ValueError("Unsupported backend '{:s}'".format(backend))
return cost
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
dimension = 3 # Dimension of the embedding space, i.e. R^k
num_points = 24 # Points on the sphere
# This value should be as close to 0 as affordable. If it is too close to
# zero, optimization first becomes much slower, than simply doesn't work
# anymore because of floating point overflow errors (NaN's and Inf's start
# to appear). If it is too large, then log-sum-exp is a poor approximation
# of the max function, and the spread will be less uniform. An okay value
# seems to be 0.01 or 0.001 for example. Note that a better strategy than
# using a small epsilon straightaway is to reduce epsilon bit by bit and to
# warm-start subsequent optimization in that way. Trustregions will be more
# appropriate for these fine tunings.
epsilon = 0.005
cost = create_cost(backend, dimension, num_points, epsilon)
manifold = Elliptope(num_points, dimension)
problem = pymanopt.Problem(manifold, cost)
if quiet:
problem.verbosity = 0
solver = ConjugateGradient(mingradnorm=1e-8, maxiter=1e5)
Yopt = solver.solve(problem)
if quiet:
return
Xopt = Yopt @ Yopt.T
maxdot = np.triu(Xopt, 1).max()
print("Maximum angle between any two points:", maxdot)
if __name__ == "__main__":
runner = ExampleRunner(run, "Packing on the sphere", SUPPORTED_BACKENDS)
runner.run()
|
alipay/aop/api/domain/AlipayBossContractManagementCancelModel.py | antopen/alipay-sdk-python-all | 213 | 11130139 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayBossContractManagementCancelModel(object):
def __init__(self):
self._biz_source = None
self._contract_batch_no = None
@property
def biz_source(self):
return self._biz_source
@biz_source.setter
def biz_source(self, value):
self._biz_source = value
@property
def contract_batch_no(self):
return self._contract_batch_no
@contract_batch_no.setter
def contract_batch_no(self, value):
self._contract_batch_no = value
def to_alipay_dict(self):
params = dict()
if self.biz_source:
if hasattr(self.biz_source, 'to_alipay_dict'):
params['biz_source'] = self.biz_source.to_alipay_dict()
else:
params['biz_source'] = self.biz_source
if self.contract_batch_no:
if hasattr(self.contract_batch_no, 'to_alipay_dict'):
params['contract_batch_no'] = self.contract_batch_no.to_alipay_dict()
else:
params['contract_batch_no'] = self.contract_batch_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayBossContractManagementCancelModel()
if 'biz_source' in d:
o.biz_source = d['biz_source']
if 'contract_batch_no' in d:
o.contract_batch_no = d['contract_batch_no']
return o
|
inference_shape_human.py | favitor/im2avatar | 131 | 11130154 | <filename>inference_shape_human.py<gh_stars>100-1000
import tensorflow as tf
import numpy as np
import os
import h5py
import sys
sys.path.append('./utils')
sys.path.append('./models')
import dataset_human as dataset
import model_shape as model
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', './train_shape_human',
"""Directory where to write summaries and checkpoint.""")
tf.app.flags.DEFINE_string('base_dir', './data/human_im2avatar',
"""The path containing all the samples.""")
tf.app.flags.DEFINE_string('data_list_path', './data_list',
"""The path containing data lists.""")
tf.app.flags.DEFINE_string('output_dir', './output_shape_human',
"""Directory to save generated volume.""")
TRAIN_DIR = FLAGS.train_dir
OUTPUT_DIR = FLAGS.output_dir
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
BATCH_SIZE = 12
IM_DIM = 128
VOL_DIM = 64
def inference(dataset_):
is_train_pl = tf.placeholder(tf.bool)
img_pl, _, = model.placeholder_inputs(BATCH_SIZE, IM_DIM, VOL_DIM)
pred = model.get_model(img_pl, is_train_pl)
pred = tf.sigmoid(pred)
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
model_path = os.path.join(TRAIN_DIR, "trained_models")
ckpt = tf.train.get_checkpoint_state(model_path)
restorer = tf.train.Saver()
restorer.restore(sess, ckpt.model_checkpoint_path)
test_samples = dataset_.getTestSampleSize()
for batch_idx in range(test_samples):
imgs, view_names = dataset_.next_test_batch(batch_idx, 1)
feed_dict = {img_pl: imgs, is_train_pl: False}
pred_res = sess.run(pred, feed_dict=feed_dict)
for i in range(len(view_names)):
vol_ = pred_res[i]
cloth = view_names[i][0]
mesh = view_names[i][1]
name_ = view_names[i][2][:-4]
save_path = os.path.join(OUTPUT_DIR, cloth, mesh)
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path_name = os.path.join(save_path, name_+".h5")
if os.path.exists(save_path_name):
os.remove(save_path_name)
h5_fout = h5py.File(save_path_name)
h5_fout.create_dataset(
'data', data=vol_,
compression='gzip', compression_opts=4,
dtype='float32')
h5_fout.close()
print batch_idx, save_path_name
def main():
test_dataset = dataset.Dataset(base_path=FLAGS.base_dir,
data_list_path=FLAGS.data_list_path)
inference(test_dataset)
if __name__ == '__main__':
main()
|
tests/numpy/rot90_test.py | Walon1998/dace | 227 | 11130157 | <reponame>Walon1998/dace<gh_stars>100-1000
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import numpy as np
from sympy.core.numbers import comp
import dace
from common import compare_numpy_output
@compare_numpy_output()
def test_rot90_2d_k0(A: dace.int32[10, 10]):
return np.rot90(A, k=0)
@compare_numpy_output()
def test_rot90_2d_k1(A: dace.int32[10, 10]):
return np.rot90(A)
@compare_numpy_output()
def test_rot90_2d_k2(A: dace.int32[10, 10]):
return np.rot90(A, k=2)
@compare_numpy_output()
def test_rot90_2d_k3(A: dace.int32[10, 10]):
return np.rot90(A, k=3)
if __name__ == '__main__':
test_rot90_2d_k0()
test_rot90_2d_k1()
test_rot90_2d_k2()
test_rot90_2d_k3()
|
websauna/system/user/interfaces.py | highPriestLOL/websauna | 286 | 11130196 | <filename>websauna/system/user/interfaces.py
"""Define various interfaces telling how user subsystem objects interact and can be looked up from registry."""
# Pyramid
import zope
from pyramid.interfaces import IRequest
from pyramid.interfaces import IResponse
from zope.interface import Interface
import authomatic
class IUser(Interface):
"""User.
Usually SQLAlchemy model instance of :py:class:`websauna.system.user.usermixin.UserMixin`.
Hard requirements for User interface listed here - this is what Websauna default frontend expects from an user instance.
:py:class:`websauna.system.user.interfaces.ILoginService` must know some user implementation details.
"""
#: How we present the user's name to the user itself. Usually. Picks one of 1) full name if set 2) username if set 3) email.
friendly_name = zope.interface.Attribute("friendly_name")
class IGroup(Interface):
"""User group.
Usually SQLAlchemy model instance of :py:class:`websauna.system.user.usermixin.GroupMixin` but can be any object.
"""
#: Then name of the group
name = zope.interface.Attribute("name")
class IUserModel(Interface):
"""Register utility registration which marks active User SQLAlchemy model class."""
class IGroupModel(Interface):
"""Register utility registration which marks active Group SQLAlchemy model class."""
class IActivationModel(Interface):
"""Register utility registration which marks active Activation SQLAlchemy model class."""
class IAuthomatic(Interface):
"""Mark Authomatic instance in the registry."""
class ISocialLoginMapper(Interface):
"""Named marker interface to look up social login mappers."""
def capture_social_media_user(self, request: IRequest, result: authomatic.core.LoginResult) -> IUserModel:
"""Extract social media information from the Authomatic login result in order to associate the user account."""
def import_social_media_user(self, user: authomatic.core.User) -> dict:
"""Map incoming social network data to internal data structure.
Sometimes social networks change how the data is presented over API and you might need to do some wiggling to get it a proper shape you wish to have.
The resulting dict must be JSON serializable as it is persisted as is.
:param user: Authomatic user.
:returns: Dict representation of the user.
"""
def update_first_login_social_data(self, user: object, data: dict):
"""Set the initial data on the user model.
When the user logs in from a social network for the first time (no prior logins with this email before) we fill in blanks in the user model with incoming data.
Default action is not to set any items.
:param user: User object.
:param data: Normalized data.
"""
class ISiteCreator(Interface):
"""Utility that is responsible to create the initial site."""
class AuthenticationFailure(Exception):
"""The user is not allowed to log in."""
class ILoginService(Interface):
"""A service that is responsible for handling normal website facing log in actions.
This service is responsible to
* Set up logged in session
* Do post login actions like redirects
Use :py:func:`websauna.system.user.utils.get_login_service` to get access to configured login service.
"""
def authentication_user(user: IUser, login_source: str, location: str = None) -> IResponse:
"""Make the current session logged in session for this particular user.
A password check is not performed. However it is checked if user is active and such.
:param location: Override the redirect page. If none use ``websauna.login_redirect``. TODO - to be changed.
:param login_source: Application specific string telling where the login come from. E.g. "social_media", "signup", "login_form".
:raise AuthenticationFailure: If the user is disabled
"""
def authenticate_credentials(username: str, login_source: str, password: str, location: str = None) -> IResponse:
"""Logs in the user.
This is called after the user credentials have been validated.
Sets the auth cookies and redirects to a post login page, which defaults to a view named 'index'.
Fills in user last login time and IP data..
:param request: Current request
:param user: Default login service is designed to work with UserMixin compatible user classes
:param location: Override the redirect page. If none use ``websauna.login_redirect``. TODO - to be changed.
:param login_source: Application specific string telling where the login come from. E.g. "social_media", "signup", "login_form".
:raise AuthenticationFailure: If the password does not match or user is disabled
"""
def logout(location: str = None) -> IResponse:
"""Log out user from the site.
* Terminate session
* Show logged out message
* Redirect the user to post login page
"""
class IOAuthLoginService(Interface):
"""A login service for federated authentication.
See :py:class:`websauna.system.interfaces.ILoginService`.
Use :py:func:`websauna.system.user.utils.get_oauth_login_service` to get access to configured login service.
"""
def handle_request(provider_name: str) -> IResponse:
"""Handle all requests coming to login/facebook, login/twitter etc. endpoints.
* Login form does an empty HTTP POST request to initiate OAuth process
* Federated authentication service does HTTP GET redirect when they accept OAuth authentication request
"""
class IUserRegistry(Interface):
"""Manage creation and querying of users.
Allow abstraction over the user backend - do not assume users are stored in the primary database.
TODO: Interface not described yet, see :py:class:`websauna.system.user.userregistry.DefaultEmailBasedUserRegistry`.
"""
class CannotResetPasswordException(Exception):
"""Password reset is disabled for this user e.g. due to disabled account."""
class ICredentialActivityService(Interface):
"""User password and activation related activities.
TODO: Interface not described yet, see :py:class:`websauna.system.user.credentialactivityservice.DefaultCredentialActivityService`.
"""
class IRegistrationService(Interface):
"""Sign up form service.
TODO: Interface not described yet, see :py:class:`websauna.system.user.registrationservice.DefaultRegistrationService`.
"""
class ILoginSchema(Interface):
"""Colander schema used for sign in form.
See :py:meth:`websauna.system.Initializer.configure_user_forms`.
"""
class ILoginForm(Interface):
"""Deform form used for sign in form.
See :py:meth:`websauna.system.Initializer.configure_user_forms`.
"""
class IRegisterSchema(Interface):
"""Colander schema used for sign upform.
See :py:meth:`websauna.system.Initializer.configure_user_forms`.
"""
class IRegisterForm(Interface):
"""Deform form used for sign upform.
See :py:meth:`websauna.system.Initializer.configure_user_forms`.
"""
class IForgotPasswordForm(Interface):
"""Deform form used for Forgot password form.
See :py:meth:`websauna.system.Initializer.configure_user_forms`.
"""
class IForgotPasswordSchema(Interface):
"""Colander schema used for Forgot password form.
See :py:meth:`websauna.system.Initializer.configure_user_forms`.
"""
class IResetPasswordForm(Interface):
"""Deform form used for Reset password form.
See :py:meth:`websauna.system.Initializer.configure_user_forms`.
"""
class IResetPasswordSchema(Interface):
"""Colander schema used for Reset password form.
See :py:meth:`websauna.system.Initializer.configure_user_forms`.
"""
class IPasswordHasher(Interface):
"""A utility for hashing passwords.
Used by :py:meth:`websauna.system.models.usermixin.UserMixin._set_password`.
"""
def hash_password(plain_text: str) -> str:
"""Generate a hash presentation for plain text password.
This is to be stored in database.
:return: A hasher internal string format. Usually contains number of cycles, hashed password and salt string.
"""
def verify_password(hashed_password: str, plain_text: str) -> bool:
"""Verify a password.
Compare if inputed password matches one stored in the dabase.
:return: True if the password matches, False otherwise.
"""
|
pylibui/controls/form.py | superzazu/pylibui | 222 | 11130198 | <reponame>superzazu/pylibui<filename>pylibui/controls/form.py<gh_stars>100-1000
"""
Python wrapper for libui.
"""
from pylibui import libui
from .control import Control
class Form(Control):
def __init__(self):
"""
Creates a new empty form.
"""
super().__init__()
self.control = libui.uiNewForm()
self.controls = []
def append(self, label, control, stretchy=False):
"""
Appends a control to the form.
:param label: str
:param control: control
:param stretchy: bool
:return: None
"""
libui.uiFormAppend(self.control, label, control.pointer(),
int(stretchy))
self.controls.append(control)
def delete(self, index):
"""
Deletes a child from a form.
:param index: int
:return: None
"""
libui.uiFormDelete(self.control, index)
self.controls[index].destroy()
del self.controls[index]
def getPadded(self):
"""
Returns whether the form is padded.
:return: bool
"""
return bool(libui.uiFormPadded(self.control))
def setPadded(self, padded):
"""
Sets whether the form is padded.
:param padded: bool
:return: None
"""
libui.uiFormSetPadded(self.control, int(padded))
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorboard/data_compat.py | Lube-Project/ProgettoLube | 353 | 11130271 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to migrate legacy protos to their modern equivalents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorboard.compat.proto import event_pb2
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins.audio import metadata as audio_metadata
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.util import tensor_util
def migrate_event(event):
if not event.HasField("summary"):
return event
old_values = event.summary.value
new_values = [migrate_value(value) for value in old_values]
# Optimization: Don't create a new event if there were no changes.
if len(old_values) == len(new_values) and all(
x is y for (x, y) in zip(old_values, new_values)
):
return event
result = event_pb2.Event()
result.CopyFrom(event)
del result.summary.value[:]
result.summary.value.extend(new_values)
return result
def migrate_value(value):
"""Convert `value` to a new-style value, if necessary and possible.
An "old-style" value is a value that uses any `value` field other than
the `tensor` field. A "new-style" value is a value that uses the
`tensor` field. TensorBoard continues to support old-style values on
disk; this method converts them to new-style values so that further
code need only deal with one data format.
Arguments:
value: A `Summary.Value` object. This argument is not modified.
Returns:
If the `value` is an old-style value for which there is a new-style
equivalent, the result is the new-style value. Otherwise---if the
value is already new-style or does not yet have a new-style
equivalent---the value will be returned unchanged.
:type value: Summary.Value
:rtype: Summary.Value
"""
handler = {
"histo": _migrate_histogram_value,
"image": _migrate_image_value,
"audio": _migrate_audio_value,
"simple_value": _migrate_scalar_value,
}.get(value.WhichOneof("value"))
return handler(value) if handler else value
def make_summary(tag, metadata, data):
tensor_proto = tensor_util.make_tensor_proto(data)
return summary_pb2.Summary.Value(
tag=tag, metadata=metadata, tensor=tensor_proto
)
def _migrate_histogram_value(value):
histogram_value = value.histo
bucket_lefts = [histogram_value.min] + histogram_value.bucket_limit[:-1]
bucket_rights = histogram_value.bucket_limit[:-1] + [histogram_value.max]
bucket_counts = histogram_value.bucket
buckets = np.array(
[bucket_lefts, bucket_rights, bucket_counts], dtype=np.float32
).transpose()
summary_metadata = histogram_metadata.create_summary_metadata(
display_name=value.metadata.display_name or value.tag,
description=value.metadata.summary_description,
)
return make_summary(value.tag, summary_metadata, buckets)
def _migrate_image_value(value):
image_value = value.image
data = [
str(image_value.width).encode("ascii"),
str(image_value.height).encode("ascii"),
image_value.encoded_image_string,
]
summary_metadata = image_metadata.create_summary_metadata(
display_name=value.metadata.display_name or value.tag,
description=value.metadata.summary_description,
)
return make_summary(value.tag, summary_metadata, data)
def _migrate_audio_value(value):
audio_value = value.audio
data = [[audio_value.encoded_audio_string, b""]] # empty label
summary_metadata = audio_metadata.create_summary_metadata(
display_name=value.metadata.display_name or value.tag,
description=value.metadata.summary_description,
encoding=audio_metadata.Encoding.Value("WAV"),
)
return make_summary(value.tag, summary_metadata, data)
def _migrate_scalar_value(value):
scalar_value = value.simple_value
summary_metadata = scalar_metadata.create_summary_metadata(
display_name=value.metadata.display_name or value.tag,
description=value.metadata.summary_description,
)
return make_summary(value.tag, summary_metadata, scalar_value)
|
resources/recruit_database.py | litchiar/ArknightsAutoHelper | 1,035 | 11130276 | recruit_database = [
('Lancet-2', 0, ['医疗干员', '远程位', '治疗', '支援机械']),
('Castle-3', 0, ['近卫干员', '近战位', '支援', '支援机械']),
('夜刀', 1, ['先锋干员', '近战位', '新手']),
('黑角', 1, ['重装干员', '近战位', '新手']),
('巡林者', 1, ['狙击干员', '远程位', '新手']),
('杜林', 1, ['术师干员', '远程位', '新手']),
('12F', 1, ['术师干员', '远程位', '新手']),
('芬', 2, ['先锋干员', '近战位', '费用回复']),
('香草', 2, ['先锋干员', '近战位', '费用回复']),
('翎羽', 2, ['先锋干员', '近战位', '输出', '费用回复']),
('玫兰莎', 2, ['近卫干员', '近战位', '输出', '生存']),
('米格鲁', 2, ['重装干员', '近战位', '防护']),
('克洛丝', 2, ['狙击干员', '远程位', '输出']),
('安德切尔', 2, ['狙击干员', '远程位', '输出']),
('炎熔', 2, ['术师干员', '远程位', '群攻']),
('芙蓉', 2, ['医疗干员', '远程位', '治疗']),
('安赛尔', 2, ['医疗干员', '远程位', '治疗']),
('史都华德', 2, ['术师干员', '远程位', '输出']),
('梓兰', 2, ['辅助干员', '远程位', '减速']),
('夜烟', 3, ['术师干员', '远程位', '输出', '削弱']),
('远山', 3, ['术师干员', '远程位', '群攻']),
('杰西卡', 3, ['狙击干员', '远程位', '输出', '生存']),
('流星', 3, ['狙击干员', '远程位', '输出', '削弱']),
('白雪', 3, ['狙击干员', '远程位', '群攻', '减速']),
('清道夫', 3, ['先锋干员', '近战位', '费用回复', '输出']),
('红豆', 3, ['先锋干员', '近战位', '输出', '费用回复']),
('杜宾', 3, ['近卫干员', '近战位', '输出', '支援']),
('缠丸', 3, ['近卫干员', '近战位', '生存', '输出']),
('霜叶', 3, ['近卫干员', '近战位', '减速', '输出']),
('艾丝黛尔', 3, ['近卫干员', '近战位', '群攻', '生存']),
('慕斯', 3, ['近卫干员', '近战位', '输出']),
('砾', 3, ['特种干员', '近战位', '快速复活', '防护']),
('暗索', 3, ['特种干员', '近战位', '位移']),
('末药', 3, ['医疗干员', '远程位', '治疗']),
('嘉维尔', 3, ['医疗干员', '远程位', '治疗']),
('调香师', 3, ['医疗干员', '远程位', '治疗']),
('角峰', 3, ['重装干员', '近战位', '防护']),
('蛇屠箱', 3, ['重装干员', '近战位', '防护']),
('古米', 3, ['重装干员', '近战位', '防护', '治疗']),
('地灵', 3, ['辅助干员', '远程位', '减速']),
('阿消', 3, ['特种干员', '近战位', '位移']),
('白面鸮', 4, ['医疗干员', '远程位', '治疗', '支援']),
('凛冬', 4, ['先锋干员', '近战位', '费用回复', '支援']),
('德克萨斯', 4, ['先锋干员', '近战位', '费用回复', '控场']),
('因陀罗', 4, ['近卫干员', '近战位', '输出', '生存']),
('幽灵鲨', 4, ['近卫干员', '近战位', '群攻', '生存']),
('蓝毒', 4, ['狙击干员', '远程位', '输出']),
('白金', 4, ['狙击干员', '远程位', '输出']),
('陨星', 4, ['狙击干员', '远程位', '群攻', '削弱']),
('梅尔', 4, ['辅助干员', '远程位', '召唤', '控场']),
('赫默', 4, ['医疗干员', '远程位', '治疗']),
('华法琳', 4, ['医疗干员', '远程位', '治疗', '支援']),
('临光', 4, ['重装干员', '近战位', '防护', '治疗']),
('红', 4, ['特种干员', '近战位', '快速复活', '控场']),
('雷蛇', 4, ['重装干员', '近战位', '防护', '输出']),
('可颂', 4, ['重装干员', '近战位', '防护', '位移']),
('火神', 4, ['重装干员', '近战位', '生存', '防护', '输出']),
('普罗旺斯', 4, ['狙击干员', '远程位', '输出']),
('守林人', 4, ['狙击干员', '远程位', '输出', '爆发']),
('崖心', 4, ['特种干员', '近战位', '位移', '输出']),
('初雪', 4, ['辅助干员', '远程位', '削弱']),
('真理', 4, ['辅助干员', '远程位', '减速', '输出']),
('狮蝎', 4, ['特种干员', '近战位', '输出', '生存']),
('食铁兽', 4, ['特种干员', '近战位', '位移', '减速']),
('能天使', 5, ['狙击干员', '远程位', '输出']),
('推进之王', 5, ['先锋干员', '近战位', '费用回复', '输出']),
('伊芙利特', 5, ['术师干员', '远程位', '群攻', '削弱']),
('闪灵', 5, ['医疗干员', '远程位', '治疗', '支援']),
('夜莺', 5, ['医疗干员', '远程位', '治疗', '支援']),
('星熊', 5, ['重装干员', '近战位', '防护', '输出']),
('塞雷娅', 5, ['重装干员', '近战位', '防护', '治疗', '支援']),
('银灰', 5, ['近卫干员', '近战位', '输出', '支援']),
('空爆', 2, ['狙击干员', '远程位', '群攻']),
('月见夜', 2, ['近卫干员', '近战位', '输出']),
('猎蜂', 3, ['近卫干员', '近战位', '输出']),
('夜魔', 4, ['术师干员', '远程位', '输出', '治疗', '减速']),
('斯卡蒂', 5, ['近卫干员', '近战位', '输出', '生存']),
('陈', 5, ['近卫干员', '近战位', '输出', '爆发']),
('诗怀雅', 4, ['近卫干员', '近战位', '输出', '支援']),
('格雷伊', 3, ['术师干员', '远程位', '群攻', '减速']),
('泡普卡', 2, ['近卫干员', '近战位', '群攻', '生存']),
('斑点', 2, ['重装干员', '近战位', '防护', '治疗']),
('THRM-EX', 0, ['特种干员', '近战位', '爆发', '支援机械']),
('黑', 5, ['狙击干员', '远程位', '输出']),
('赫拉格', 5, ['近卫干员', '近战位', '输出', '生存']),
('格劳克斯', 4, ['辅助干员', '远程位', '减速', '控场']),
('星极', 4, ['近卫干员', '近战位', '输出', '防护']),
('苏苏洛', 3, ['医疗干员', '远程位', '治疗']),
('桃金娘', 3, ['先锋干员', '近战位', '费用回复', '治疗']),
('麦哲伦', 5, ['辅助干员', '远程位', '支援', '减速', '输出']),
('送葬人', 4, ['狙击干员', '远程位', '群攻']),
('红云', 3, ['狙击干员', '远程位', '输出']),
('莫斯提马', 5, ['术师干员', '远程位', '群攻', '支援', '控场']),
('槐琥', 4, ['特种干员', '近战位', '快速复活', '削弱']),
('清流', 3, ['医疗干员', '远程位', '治疗', '支援']),
('梅', 3, ['狙击干员', '远程位', '输出', '减速']),
('煌', 5, ['近卫干员', '近战位', '输出', '生存']),
('灰喉', 4, ['狙击干员', '远程位', '输出']),
('苇草', 4, ['先锋干员', '近战位', '费用回复', '输出']),
('布洛卡', 4, ['近卫干员', '近战位', '群攻', '生存']),
('安比尔', 3, ['狙击干员', '远程位', '输出', '减速']),
]
|
tests/test_jsonnet.py | laserb/kapitan | 1,413 | 11130278 | #!/usr/bin/env python3
# Copyright 2019 The Kapitan Authors
# SPDX-FileCopyrightText: 2020 The Kapitan Authors <<EMAIL>>
#
# SPDX-License-Identifier: Apache-2.0
"jsonnet tests"
import json
import os
import unittest
from kapitan.resources import (
file_exists,
dir_files_list,
dir_files_read,
gzip_b64,
jsonschema_validate,
yaml_dump,
yaml_dump_stream,
yaml_load,
yaml_load_stream,
)
from kapitan.utils import prune_empty, sha256_string
class JsonnetNativeFuncsTest(unittest.TestCase):
def test_yaml_dump(self):
"""dump json string to yaml"""
yaml = yaml_dump('{"key":"value"}')
self.assertEqual(yaml, "key: value\n")
def test_yaml_dump_stream(self):
"""dump json string to yaml"""
yaml = yaml_dump_stream('[{"key":"value"},{"key":"value"}]')
self.assertEqual(yaml, "key: value\n---\nkey: value\n")
def test_file_exists(self):
"""test that file_exists finds this test file"""
search_paths = [os.getcwd(), "./tests/"]
result = file_exists(search_paths, "test_jsonnet.py")
expected = {"exists": True, "path": "./tests/test_jsonnet.py"}
self.assertEqual(result, expected)
def test_dir_files_list(self):
"""test if list of files in a dir"""
search_paths = [os.getcwd(), "./tests/"]
result = dir_files_list(search_paths, "test_jsonnet")
expected = ["file1.txt", "file2.txt"]
self.assertEqual(result.sort(), expected.sort())
with self.assertRaises(IOError):
dir_files_list(search_paths, "non_existing_dir")
def test_dir_files_read(self):
"""must result in dict with key:
- file_name (contents of the file)"""
search_paths = [os.getcwd(), "./tests/"]
result = dir_files_read(search_paths, "test_jsonnet")
expected = {
"file1.txt": "To be, or not to be: that is the question",
"file2.txt": "Nothing will come of nothing.",
}
self.assertEqual(result, expected)
def test_yaml_load(self):
"""
This tests the yaml_load function.
It converts the yaml file in test_resources/ to a json string
"""
current_pwd = os.path.dirname(__file__)
json = yaml_load([current_pwd], "test_resources/test_yaml_load.yaml")
expected_output = """{"test": {"key": "value", "array": ["ele1", "ele2"]}}"""
self.assertEqual(json, expected_output)
def test_yaml_load_stream(self):
"""
This tests the yaml_load_stream function.
It converts the yaml file in test_resources/ to a json string
"""
current_pwd = os.path.dirname(__file__)
json = yaml_load_stream([current_pwd], "test_resources/test_yaml_load_stream.yaml")
expected_output = """[{"test1": {"key": "value", "array": ["ele1", "ele2"]}}, {"test2": {"key": "value", "array": ["ele1", "ele2"]}}]"""
self.assertEqual(json, expected_output)
def test_sha256_string(self):
"""sha256 hex digest for string"""
hash = sha256_string("test")
self.assertEqual(hash, "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08")
def test_gzip_b64(self):
"""base64-encoded gzip-compression for string"""
gzip = gzip_b64("test")
self.assertEqual(gzip, "H4sIAAAAAAAC/ytJLS4BAAx+f9gEAAAA")
def test_prune_empty(self):
"""Remove empty lists and empty dictionaries from dict"""
dictionary = {"hello": "world", "array": [1, 2], "foo": {}, "bar": []}
pruned = prune_empty(dictionary)
self.assertEqual(pruned, {"hello": "world", "array": [1, 2]})
def test_jsonschema_valid(self):
"""validate valid obj with jsonschema"""
dictionary = {"msg": "hello, world!", "array": [1, 2]}
schema = {
"type": "object",
"properties": {
"msg": {"type": "string"},
"array": {"type": "array", "contains": {"type": "number"}},
},
}
validation = jsonschema_validate(json.dumps(dictionary), json.dumps(schema))
self.assertTrue(validation["valid"])
self.assertEqual(validation["reason"], "")
def test_jsonschema_invalid(self):
"""validate invalid obj with jsonschema"""
dictionary = {"msg": "hello, world!", "array": ["a", "b", "c"]}
schema = {
"type": "object",
"properties": {
"msg": {"type": "string"},
"array": {"type": "array", "contains": {"type": "number"}},
},
}
validation = jsonschema_validate(json.dumps(dictionary), json.dumps(schema))
self.assertFalse(validation["valid"])
self.assertNotEqual(validation["reason"], "")
|
monai/networks/nets/netadapter.py | dyollb/MONAI | 2,971 | 11130279 | <reponame>dyollb/MONAI
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Tuple, Union
import torch
from monai.networks.layers import Conv, get_pool_layer
from monai.utils import deprecated_arg
class NetAdapter(torch.nn.Module):
"""
Wrapper to replace the last layer of model by convolutional layer or FC layer.
This module expects the output of `model layers[0: -2]` is a feature map with shape [B, C, spatial dims],
then replace the model's last two layers with an optional `pooling` and a `conv` or `linear` layer.
Args:
model: a PyTorch model, support both 2D and 3D models. typically, it can be a pretrained model in Torchvision,
like: ``resnet18``, ``resnet34m``, ``resnet50``, ``resnet101``, ``resnet152``, etc.
more details: https://pytorch.org/vision/stable/models.html.
num_classes: number of classes for the last classification layer. Default to 1.
dim: number of spatial dimensions, default to 2.
in_channels: number of the input channels of last layer. if None, get it from `in_features` of last layer.
use_conv: whether use convolutional layer to replace the last layer, default to False.
pool: parameters for the pooling layer, it should be a tuple, the first item is name of the pooling layer,
the second item is dictionary of the initialization args. if None, will not replace the `layers[-2]`.
default to `("avg", {"kernel_size": 7, "stride": 1})`.
bias: the bias value when replacing the last layer. if False, the layer will not learn an additive bias,
default to True.
.. deprecated:: 0.6.0
``n_classes`` is deprecated, use ``num_classes`` instead.
"""
@deprecated_arg("n_classes", since="0.6")
def __init__(
self,
model: torch.nn.Module,
num_classes: int = 1,
dim: int = 2,
in_channels: Optional[int] = None,
use_conv: bool = False,
pool: Optional[Tuple[str, Dict[str, Any]]] = ("avg", {"kernel_size": 7, "stride": 1}),
bias: bool = True,
n_classes: Optional[int] = None,
):
super().__init__()
# in case the new num_classes is default but you still call deprecated n_classes
if n_classes is not None and num_classes == 1:
num_classes = n_classes
layers = list(model.children())
orig_fc = layers[-1]
in_channels_: int
if in_channels is None:
if not hasattr(orig_fc, "in_features"):
raise ValueError("please specify the input channels of last layer with arg `in_channels`.")
in_channels_ = orig_fc.in_features # type: ignore
else:
in_channels_ = in_channels
if pool is None:
self.pool = None
# remove the last layer
self.features = torch.nn.Sequential(*layers[:-1])
else:
self.pool = get_pool_layer(name=pool, spatial_dims=dim)
# remove the last 2 layers
self.features = torch.nn.Sequential(*layers[:-2])
self.fc: Union[torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d]
if use_conv:
# add 1x1 conv (it behaves like a FC layer)
self.fc = Conv[Conv.CONV, dim](in_channels=in_channels_, out_channels=num_classes, kernel_size=1, bias=bias)
else:
# remove the last Linear layer (fully connected)
self.features = torch.nn.Sequential(*layers[:-1])
# replace the out_features of FC layer
self.fc = torch.nn.Linear(in_features=in_channels_, out_features=num_classes, bias=bias)
self.use_conv = use_conv
def forward(self, x):
x = self.features(x)
if self.pool is not None:
x = self.pool(x)
if not self.use_conv:
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
oslo/torch/utils/data/data_collators.py | lipovsek/oslo | 249 | 11130297 | from typing import List, Optional
import torch
from oslo.torch.distributed import ParallelContext, ParallelMode
class SequenceDataParallelCollator:
def __init__(
self,
parallel_keys: List[str],
parallel_context: ParallelContext,
dim: int = 1,
pad_token_id: Optional[int] = 0,
):
self.parallel_keys = parallel_keys
self.dim = dim
self.pad_token_id = pad_token_id
self.local_rank = parallel_context.get_local_rank(ParallelMode.SEQUENCE)
self.local_world_size = parallel_context.get_world_size(ParallelMode.SEQUENCE)
def __call__(self, **features):
for key in self.parallel_keys:
assert (
key in features
), f"The {key} must be in the input of `SequenceDataParallelCollator`."
value = features[key]
value_size = value.size()
seq_length = value_size[self.dim]
new_seq_length = seq_length
while new_seq_length % self.local_world_size != 0:
new_seq_length += 1
num_pads = new_seq_length - seq_length
if num_pads > 0:
pad_size = list(value_size)
pad_size[self.dim] = num_pads
pads = (
torch.ones(
pad_size,
dtype=value.dtype,
device=value.device,
)
* self.pad_token_id
)
value = torch.cat([value, pads], dim=self.dim)
value = value.chunk(
self.local_world_size,
dim=self.dim,
)[self.local_rank]
if not value.is_contiguous():
value = value.contiguous()
features[key] = value
return features
|
coconut/compiler/matching.py | evhub/coconut | 3,624 | 11130313 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------------------------------------------------
# INFO:
# -----------------------------------------------------------------------------------------------------------------------
"""
Author: <NAME>
License: Apache 2.0
Description: Handles Coconut pattern-matching.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
from coconut.root import * # NOQA
from contextlib import contextmanager
from collections import OrderedDict
from coconut.terminal import (
internal_assert,
logger,
)
from coconut.exceptions import (
CoconutInternalException,
CoconutDeferredSyntaxError,
CoconutSyntaxWarning,
)
from coconut.constants import (
match_temp_var,
wildcard,
openindent,
closeindent,
const_vars,
function_match_error_var,
match_set_name_var,
)
from coconut.compiler.util import (
paren_join,
handle_indentation,
)
# -----------------------------------------------------------------------------------------------------------------------
# UTILITIES:
# -----------------------------------------------------------------------------------------------------------------------
def get_match_names(match):
"""Gets keyword names for the given match."""
names = []
if "paren" in match:
(match,) = match
names += get_match_names(match)
elif "var" in match:
(setvar,) = match
if setvar != wildcard:
names.append(setvar)
elif "trailer" in match:
match, trailers = match[0], match[1:]
for i in range(0, len(trailers), 2):
op, arg = trailers[i], trailers[i + 1]
if op == "as":
names.append(arg)
names += get_match_names(match)
elif "as" in match:
match, name = match
names.append(name)
names += get_match_names(match)
return names
# -----------------------------------------------------------------------------------------------------------------------
# MATCHER:
# -----------------------------------------------------------------------------------------------------------------------
class Matcher(object):
"""Pattern-matching processor."""
__slots__ = (
"comp",
"original",
"loc",
"check_var",
"style",
"position",
"checkdefs",
"names",
"var_index_obj",
"name_list",
"child_groups",
"guards",
"parent_names",
)
matchers = {
"dict": lambda self: self.match_dict,
"iter": lambda self: self.match_iterator,
"series": lambda self: self.match_sequence,
"rseries": lambda self: self.match_rsequence,
"mseries": lambda self: self.match_msequence,
"string": lambda self: self.match_string,
"rstring": lambda self: self.match_rstring,
"mstring": lambda self: self.match_mstring,
"const": lambda self: self.match_const,
"var": lambda self: self.match_var,
"set": lambda self: self.match_set,
"data": lambda self: self.match_data,
"class": lambda self: self.match_class,
"data_or_class": lambda self: self.match_data_or_class,
"paren": lambda self: self.match_paren,
"trailer": lambda self: self.match_trailer,
"and": lambda self: self.match_and,
"or": lambda self: self.match_or,
"star": lambda self: self.match_star,
"implicit_tuple": lambda self: self.match_implicit_tuple,
"view": lambda self: self.match_view,
"infix": lambda self: self.match_infix,
}
valid_styles = (
"coconut",
"python",
"coconut warn",
"python warn",
"coconut strict",
"python strict",
)
def __init__(self, comp, original, loc, check_var, style="coconut", name_list=None, checkdefs=None, parent_names={}, var_index_obj=None):
"""Creates the matcher."""
self.comp = comp
self.original = original
self.loc = loc
self.check_var = check_var
internal_assert(style in self.valid_styles, "invalid Matcher style", style)
self.style = style
self.name_list = name_list
self.position = 0
self.checkdefs = []
if checkdefs is None:
self.increment()
else:
for checks, defs in checkdefs:
self.checkdefs.append((checks[:], defs[:]))
self.set_position(-1)
self.parent_names = parent_names
self.names = OrderedDict() # ensures deterministic ordering of name setting code
self.var_index_obj = [0] if var_index_obj is None else var_index_obj
self.guards = []
self.child_groups = []
def branches(self, num_branches):
"""Create num_branches child matchers, one of which must match for the parent match to succeed."""
child_group = []
for _ in range(num_branches):
new_matcher = Matcher(self.comp, self.original, self.loc, self.check_var, self.style, self.name_list, self.checkdefs, self.names, self.var_index_obj)
new_matcher.insert_check(0, "not " + self.check_var)
child_group.append(new_matcher)
self.child_groups.append(child_group)
return child_group
def get_checks(self, position=None):
"""Gets the checks at the position."""
if position is None:
position = self.position
return self.checkdefs[position][0]
def set_checks(self, checks, position=None):
"""Sets the checks at the position."""
if position is None:
position = self.position
self.checkdefs[position][0] = checks
checks = property(get_checks, set_checks)
def get_defs(self, position=None):
"""Gets the defs at the position."""
if position is None:
position = self.position
return self.checkdefs[position][1]
def set_defs(self, defs, position=None):
"""Sets the defs at the position."""
if position is None:
position = self.position
self.checkdefs[position][1] = defs
defs = property(get_defs, set_defs)
def add_check(self, check_item):
"""Adds a check universally."""
self.checks.append(check_item)
def add_def(self, def_item):
"""Adds a def universally."""
self.defs.append(def_item)
def insert_check(self, index, check_item):
"""Inserts a check universally."""
self.checks.insert(index, check_item)
def insert_def(self, index, def_item):
"""Inserts a def universally."""
self.defs.insert(index, def_item)
@property
def using_python_rules(self):
"""Whether the current style uses PEP 622 rules."""
return self.style.startswith("python")
def rule_conflict_warn(self, message, if_coconut=None, if_python=None, extra=None):
"""Warns on conflicting style rules if callback was given."""
if self.style.endswith("warn") or self.style.endswith("strict") and self.comp.strict:
full_msg = message
if if_python or if_coconut:
full_msg += " (" + (if_python if self.using_python_rules else if_coconut) + ")"
if extra:
full_msg += " (" + extra + ")"
if self.style.endswith("strict"):
full_msg += " (remove --strict to dismiss)"
logger.warn_err(self.comp.make_err(CoconutSyntaxWarning, full_msg, self.original, self.loc))
def add_guard(self, cond):
"""Adds cond as a guard."""
self.guards.append(cond)
def set_position(self, position):
"""Sets the if-statement position."""
if position < 0:
position += len(self.checkdefs)
while position >= len(self.checkdefs):
self.checkdefs.append(([], []))
self.position = position
def increment(self, by=1):
"""Advances the if-statement position."""
new_pos = self.position + by
internal_assert(new_pos > 0, "invalid increment/decrement call to set pos to", new_pos)
self.set_position(new_pos)
def decrement(self, by=1):
"""Decrements the if-statement position."""
self.increment(-by)
@contextmanager
def down_a_level(self, by=1):
"""Increment then decrement."""
self.increment(by)
try:
yield
finally:
self.decrement(by)
def get_temp_var(self):
"""Gets the next match_temp_var."""
tempvar = match_temp_var + "_" + str(self.var_index_obj[0])
self.var_index_obj[0] += 1
return tempvar
def get_set_name_var(self, name):
"""Gets the var for checking whether a name should be set."""
return match_set_name_var + "_" + name
def register_name(self, name, value):
"""Register a new name and return its name set var."""
self.names[name] = value
if self.name_list is not None and name not in self.name_list:
self.name_list.append(name)
return self.get_set_name_var(name)
def match_var(self, tokens, item, bind_wildcard=False):
"""Matches a variable."""
varname, = tokens
if varname == wildcard and not bind_wildcard:
return
if varname in self.parent_names:
self.add_check(self.parent_names[varname] + " == " + item)
elif varname in self.names:
self.add_check(self.names[varname] + " == " + item)
else:
set_name_var = self.register_name(varname, item)
self.add_def(set_name_var + " = " + item)
def match_all_in(self, matches, item):
"""Matches all matches to elements of item."""
for i, match in enumerate(matches):
self.match(match, item + "[" + str(i) + "]")
def check_len_in(self, min_len, max_len, item):
"""Checks that the length of item is in range(min_len, max_len+1)."""
if max_len is None:
if min_len:
self.add_check("_coconut.len(" + item + ") >= " + str(min_len))
elif min_len == max_len:
self.add_check("_coconut.len(" + item + ") == " + str(min_len))
elif not min_len:
self.add_check("_coconut.len(" + item + ") <= " + str(max_len))
else:
self.add_check(str(min_len) + " <= _coconut.len(" + item + ") <= " + str(max_len))
def match_function(self, args, kwargs, pos_only_match_args=(), match_args=(), star_arg=None, kwd_only_match_args=(), dubstar_arg=None):
"""Matches a pattern-matching function."""
# before everything, pop the FunctionMatchError from context
self.add_def(function_match_error_var + " = _coconut_get_function_match_error()")
with self.down_a_level():
self.match_in_args_kwargs(pos_only_match_args, match_args, args, kwargs, allow_star_args=star_arg is not None)
if star_arg is not None:
self.match(star_arg, args + "[" + str(len(match_args)) + ":]")
self.match_in_kwargs(kwd_only_match_args, kwargs)
with self.down_a_level():
if dubstar_arg is None:
self.add_check("not " + kwargs)
else:
self.match(dubstar_arg, kwargs)
def match_in_args_kwargs(self, pos_only_match_args, match_args, args, kwargs, allow_star_args=False):
"""Matches against args or kwargs."""
req_len = 0
arg_checks = {}
to_match = [] # [(move_down, match, against)]
for i, arg in enumerate(pos_only_match_args + match_args):
if isinstance(arg, tuple):
(match, default) = arg
else:
match, default = arg, None
if i < len(pos_only_match_args): # faster if arg in pos_only_match_args
names = None
else:
names = get_match_names(match)
if default is None:
if not names:
req_len = i + 1
to_match.append((False, match, args + "[" + str(i) + "]"))
else:
arg_checks[i] = (
# if i < req_len
" and ".join('"' + name + '" not in ' + kwargs for name in names),
# if i >= req_len
"_coconut.sum((_coconut.len(" + args + ") > " + str(i) + ", "
+ ", ".join('"' + name + '" in ' + kwargs for name in names)
+ ")) == 1",
)
tempvar = self.get_temp_var()
self.add_def(
tempvar + " = "
+ args + "[" + str(i) + "] if _coconut.len(" + args + ") > " + str(i) + " else "
+ "".join(
kwargs + '.pop("' + name + '") if "' + name + '" in ' + kwargs + " else "
for name in names[:-1]
)
+ kwargs + '.pop("' + names[-1] + '")',
)
to_match.append((True, match, tempvar))
else:
if not names:
tempvar = self.get_temp_var()
self.add_def(tempvar + " = " + args + "[" + str(i) + "] if _coconut.len(" + args + ") > " + str(i) + " else " + default)
to_match.append((True, match, tempvar))
else:
arg_checks[i] = (
# if i < req_len
None,
# if i >= req_len
"_coconut.sum((_coconut.len(" + args + ") > " + str(i) + ", "
+ ", ".join('"' + name + '" in ' + kwargs for name in names)
+ ")) <= 1",
)
tempvar = self.get_temp_var()
self.add_def(
tempvar + " = "
+ args + "[" + str(i) + "] if _coconut.len(" + args + ") > " + str(i) + " else "
+ "".join(
kwargs + '.pop("' + name + '") if "' + name + '" in ' + kwargs + " else "
for name in names
)
+ default,
)
to_match.append((True, match, tempvar))
max_len = None if allow_star_args else len(pos_only_match_args) + len(match_args)
self.check_len_in(req_len, max_len, args)
for i in sorted(arg_checks):
lt_check, ge_check = arg_checks[i]
if i < req_len:
if lt_check is not None:
self.add_check(lt_check)
else:
if ge_check is not None:
self.add_check(ge_check)
for move_down, match, against in to_match:
if move_down:
with self.down_a_level():
self.match(match, against)
else:
self.match(match, against)
def match_in_kwargs(self, match_args, kwargs):
"""Matches against kwargs."""
for match, default in match_args:
names = get_match_names(match)
if not names:
raise CoconutDeferredSyntaxError("keyword-only pattern-matching function arguments must be named", self.loc)
tempvar = self.get_temp_var()
self.add_def(
tempvar + " = "
+ "".join(
kwargs + '.pop("' + name + '") if "' + name + '" in ' + kwargs + " else "
for name in names
)
+ (default if default is not None else "_coconut_sentinel"),
)
with self.down_a_level():
if default is None:
self.add_check(tempvar + " is not _coconut_sentinel")
self.match(match, tempvar)
def match_dict(self, tokens, item):
"""Matches a dictionary."""
internal_assert(1 <= len(tokens) <= 2, "invalid dict match tokens", tokens)
if len(tokens) == 1:
matches, rest = tokens[0], None
else:
matches, rest = tokens
self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Mapping)")
if rest is None:
self.rule_conflict_warn(
"ambiguous pattern; could be Coconut-style len-checking dict match or Python-style len-ignoring dict match",
if_coconut='resolving to Coconut-style len-checking dict match by default',
if_python='resolving to Python-style len-ignoring dict match due to Python-style "match: case" block',
extra="use explicit '{..., **_}' or '{..., **{}}' syntax to dismiss",
)
check_len = not self.using_python_rules
elif rest == "{}":
check_len = True
rest = None
else:
check_len = False
if check_len:
self.add_check("_coconut.len(" + item + ") == " + str(len(matches)))
seen_keys = set()
for k, v in matches:
if k in seen_keys:
raise CoconutDeferredSyntaxError("duplicate key {k!r} in dictionary pattern".format(k=k), self.loc)
seen_keys.add(k)
key_var = self.get_temp_var()
self.add_def(key_var + " = " + item + ".get(" + k + ", _coconut_sentinel)")
with self.down_a_level():
self.add_check(key_var + " is not _coconut_sentinel")
self.match(v, key_var)
if rest is not None and rest != wildcard:
match_keys = [k for k, v in matches]
with self.down_a_level():
self.add_def(
rest + " = dict((k, v) for k, v in "
+ item + ".items() if k not in set(("
+ ", ".join(match_keys) + ("," if len(match_keys) == 1 else "")
+ ")))",
)
def assign_to_series(self, name, series_type, item):
"""Assign name to item converted to the given series_type."""
if self.using_python_rules or series_type == "[":
self.add_def(name + " = _coconut.list(" + item + ")")
elif series_type == "(":
self.add_def(name + " = _coconut.tuple(" + item + ")")
else:
raise CoconutInternalException("invalid series match type", series_type)
def match_implicit_tuple(self, tokens, item):
"""Matches an implicit tuple."""
return self.match_sequence(["(", tokens], item)
def match_sequence(self, tokens, item):
"""Matches a sequence."""
internal_assert(2 <= len(tokens) <= 3, "invalid sequence match tokens", tokens)
tail = None
if len(tokens) == 2:
series_type, matches = tokens
else:
series_type, matches, tail = tokens
self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Sequence)")
if tail is None:
self.add_check("_coconut.len(" + item + ") == " + str(len(matches)))
else:
self.add_check("_coconut.len(" + item + ") >= " + str(len(matches)))
if tail != wildcard:
if len(matches) > 0:
splice = "[" + str(len(matches)) + ":]"
else:
splice = ""
self.assign_to_series(tail, series_type, item + splice)
self.match_all_in(matches, item)
def match_iterator(self, tokens, item):
"""Matches a lazy list or a chain."""
internal_assert(2 <= len(tokens) <= 3, "invalid iterator match tokens", tokens)
tail = None
if len(tokens) == 2:
_, matches = tokens
else:
_, matches, tail = tokens
self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Iterable)")
if tail is None:
itervar = self.get_temp_var()
self.add_def(itervar + " = _coconut.tuple(" + item + ")")
elif matches:
itervar = self.get_temp_var()
if tail == wildcard:
tail = item
else:
self.add_def(tail + " = _coconut.iter(" + item + ")")
self.add_def(itervar + " = _coconut.tuple(_coconut_igetitem(" + tail + ", _coconut.slice(None, " + str(len(matches)) + ")))")
else:
itervar = None
if tail != wildcard:
self.add_def(tail + " = " + item)
if itervar is not None:
with self.down_a_level():
self.add_check("_coconut.len(" + itervar + ") == " + str(len(matches)))
self.match_all_in(matches, itervar)
def match_star(self, tokens, item):
"""Matches starred assignment."""
internal_assert(1 <= len(tokens) <= 3, "invalid star match tokens", tokens)
head_matches, last_matches = None, None
if len(tokens) == 1:
middle = tokens[0]
elif len(tokens) == 2:
if isinstance(tokens[0], str):
middle, last_matches = tokens
else:
head_matches, middle = tokens
else:
head_matches, middle, last_matches = tokens
self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Iterable)")
if head_matches is None and last_matches is None:
if middle != wildcard:
self.add_def(middle + " = _coconut.list(" + item + ")")
else:
itervar = self.get_temp_var()
self.add_def(itervar + " = _coconut.list(" + item + ")")
with self.down_a_level():
req_length = (len(head_matches) if head_matches is not None else 0) + (len(last_matches) if last_matches is not None else 0)
self.add_check("_coconut.len(" + itervar + ") >= " + str(req_length))
if middle != wildcard:
head_splice = str(len(head_matches)) if head_matches is not None else ""
last_splice = "-" + str(len(last_matches)) if last_matches is not None else ""
self.add_def(middle + " = " + itervar + "[" + head_splice + ":" + last_splice + "]")
if head_matches is not None:
self.match_all_in(head_matches, itervar)
if last_matches is not None:
for x in range(1, len(last_matches) + 1):
self.match(last_matches[-x], itervar + "[-" + str(x) + "]")
def match_rsequence(self, tokens, item):
"""Matches a reverse sequence."""
front, series_type, matches = tokens
self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Sequence)")
self.add_check("_coconut.len(" + item + ") >= " + str(len(matches)))
if front != wildcard:
if len(matches):
splice = "[:" + str(-len(matches)) + "]"
else:
splice = ""
self.assign_to_series(front, series_type, item + splice)
for i, match in enumerate(matches):
self.match(match, item + "[" + str(i - len(matches)) + "]")
def match_msequence(self, tokens, item):
"""Matches a middle sequence."""
series_type, head_matches, middle, _, last_matches = tokens
self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Sequence)")
self.add_check("_coconut.len(" + item + ") >= " + str(len(head_matches) + len(last_matches)))
if middle != wildcard:
if len(head_matches) and len(last_matches):
splice = "[" + str(len(head_matches)) + ":" + str(-len(last_matches)) + "]"
elif len(head_matches):
splice = "[" + str(len(head_matches)) + ":]"
elif len(last_matches):
splice = "[:" + str(-len(last_matches)) + "]"
else:
splice = ""
self.assign_to_series(middle, series_type, item + splice)
self.match_all_in(head_matches, item)
for i, match in enumerate(last_matches):
self.match(match, item + "[" + str(i - len(last_matches)) + "]")
def match_string(self, tokens, item):
"""Match prefix string."""
prefix, name = tokens
return self.match_mstring((prefix, name, None), item)
def match_rstring(self, tokens, item):
"""Match suffix string."""
name, suffix = tokens
return self.match_mstring((None, name, suffix), item)
def match_mstring(self, tokens, item):
"""Match prefix and suffix string."""
prefix, name, suffix = tokens
if prefix is None:
use_bytes = suffix.startswith("b")
elif suffix is None:
use_bytes = prefix.startswith("b")
elif prefix.startswith("b") and suffix.startswith("b"):
use_bytes = True
elif prefix.startswith("b") or suffix.startswith("b"):
raise CoconutDeferredSyntaxError("string literals and byte literals cannot be added in patterns", self.loc)
else:
use_bytes = False
if use_bytes:
self.add_check("_coconut.isinstance(" + item + ", _coconut.bytes)")
else:
self.add_check("_coconut.isinstance(" + item + ", _coconut.str)")
if prefix is not None:
self.add_check(item + ".startswith(" + prefix + ")")
if suffix is not None:
self.add_check(item + ".endswith(" + suffix + ")")
if name != wildcard:
self.add_def(
name + " = " + item + "["
+ ("" if prefix is None else self.comp.eval_now("len(" + prefix + ")")) + ":"
+ ("" if suffix is None else self.comp.eval_now("-len(" + suffix + ")")) + "]",
)
def match_const(self, tokens, item):
"""Matches a constant."""
match, = tokens
if match in const_vars:
self.add_check(item + " is " + match)
else:
self.add_check(item + " == " + match)
def match_set(self, tokens, item):
"""Matches a set."""
match, = tokens
self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Set)")
self.add_check("_coconut.len(" + item + ") == " + str(len(match)))
for const in match:
self.add_check(const + " in " + item)
def split_data_or_class_match(self, tokens):
"""Split data/class match tokens into cls_name, pos_matches, name_matches, star_match."""
cls_name, matches = tokens
pos_matches = []
name_matches = {}
star_match = None
for match_arg in matches:
if len(match_arg) == 1:
match, = match_arg
if star_match is not None:
raise CoconutDeferredSyntaxError("positional arg after starred arg in data/class match", self.loc)
if name_matches:
raise CoconutDeferredSyntaxError("positional arg after named arg in data/class match", self.loc)
pos_matches.append(match)
elif len(match_arg) == 2:
internal_assert(match_arg[0] == "*", "invalid starred data/class match arg tokens", match_arg)
_, match = match_arg
if star_match is not None:
raise CoconutDeferredSyntaxError("duplicate starred arg in data/class match", self.loc)
if name_matches:
raise CoconutDeferredSyntaxError("both starred arg and named arg in data/class match", self.loc)
star_match = match
elif len(match_arg) == 3:
internal_assert(match_arg[1] == "=", "invalid named data/class match arg tokens", match_arg)
name, _, match = match_arg
if star_match is not None:
raise CoconutDeferredSyntaxError("both named arg and starred arg in data/class match", self.loc)
if name in name_matches:
raise CoconutDeferredSyntaxError("duplicate named arg {name!r} in data/class match".format(name=name), self.loc)
name_matches[name] = match
else:
raise CoconutInternalException("invalid data/class match arg", match_arg)
return cls_name, pos_matches, name_matches, star_match
def match_class(self, tokens, item):
"""Matches a class PEP-622-style."""
cls_name, pos_matches, name_matches, star_match = self.split_data_or_class_match(tokens)
self.add_check("_coconut.isinstance(" + item + ", " + cls_name + ")")
self_match_matcher, other_cls_matcher = self.branches(2)
# handle instances of _coconut_self_match_types
self_match_matcher.add_check("_coconut.isinstance(" + item + ", _coconut_self_match_types)")
if pos_matches:
if len(pos_matches) > 1:
self_match_matcher.add_def('raise _coconut.TypeError("too many positional args in class match (got ' + str(len(pos_matches)) + '; type supports 1)")')
else:
self_match_matcher.match(pos_matches[0], item)
# handle all other classes
other_cls_matcher.add_check("not _coconut.isinstance(" + item + ", _coconut_self_match_types)")
for i, match in enumerate(pos_matches):
other_cls_matcher.match(match, "_coconut.getattr(" + item + ", " + item + ".__match_args__[" + str(i) + "])")
# handle starred arg
if star_match is not None:
temp_var = self.get_temp_var()
self.add_def(
"{temp_var} = _coconut.tuple(_coconut.getattr({item}, {item}.__match_args__[i]) for i in _coconut.range({min_ind}, _coconut.len({item}.__match_args__)))".format(
temp_var=temp_var,
item=item,
min_ind=len(pos_matches),
),
)
with self.down_a_level():
self.match(star_match, temp_var)
# handle keyword args
for name, match in name_matches.items():
self.match(match, item + "." + name)
def match_data(self, tokens, item):
"""Matches a data type."""
cls_name, pos_matches, name_matches, star_match = self.split_data_or_class_match(tokens)
self.add_check("_coconut.isinstance(" + item + ", " + cls_name + ")")
if star_match is None:
self.add_check(
'_coconut.len({item}) == {total_len}'.format(
item=item,
total_len=len(pos_matches) + len(name_matches),
),
)
# avoid checking >= 0
elif len(pos_matches):
self.add_check(
"_coconut.len({item}) >= {min_len}".format(
item=item,
min_len=len(pos_matches),
),
)
self.match_all_in(pos_matches, item)
if star_match is not None:
self.match(star_match, item + "[" + str(len(pos_matches)) + ":]")
for name, match in name_matches.items():
self.match(match, item + "." + name)
def match_data_or_class(self, tokens, item):
"""Matches an ambiguous data or class match."""
self.rule_conflict_warn(
"ambiguous pattern; could be class match or data match",
if_coconut='resolving to Coconut data match by default',
if_python='resolving to Python-style class match due to Python-style "match: case" block',
extra="use explicit 'data data_name(patterns)' or 'class cls_name(patterns)' syntax to dismiss",
)
if self.using_python_rules:
return self.match_class(tokens, item)
else:
return self.match_data(tokens, item)
def match_paren(self, tokens, item):
"""Matches a paren."""
match, = tokens
return self.match(match, item)
def match_trailer(self, tokens, item):
"""Matches typedefs and as patterns."""
internal_assert(len(tokens) > 1 and len(tokens) % 2 == 1, "invalid trailer match tokens", tokens)
match, trailers = tokens[0], tokens[1:]
for i in range(0, len(trailers), 2):
op, arg = trailers[i], trailers[i + 1]
if op == "as":
self.match_var([arg], item, bind_wildcard=True)
elif op == "is":
self.add_check("_coconut.isinstance(" + item + ", " + arg + ")")
else:
raise CoconutInternalException("invalid trailer match operation", op)
self.match(match, item)
def match_and(self, tokens, item):
"""Matches and."""
for match in tokens:
self.match(match, item)
def match_or(self, tokens, item):
"""Matches or."""
new_matchers = self.branches(len(tokens))
for m, tok in zip(new_matchers, tokens):
m.match(tok, item)
def match_view(self, tokens, item):
"""Matches view patterns"""
view_func, view_pattern = tokens
func_result_var = self.get_temp_var()
self.add_def(
handle_indentation(
"""
try:
{func_result_var} = ({view_func})({item})
except _coconut.Exception as _coconut_view_func_exc:
if _coconut.getattr(_coconut_view_func_exc.__class__, "__name__", None) == "MatchError":
{func_result_var} = _coconut_sentinel
else:
raise
""",
).format(
func_result_var=func_result_var,
view_func=view_func,
item=item,
),
)
with self.down_a_level():
self.add_check(func_result_var + " is not _coconut_sentinel")
self.match(view_pattern, func_result_var)
def match_infix(self, tokens, item):
"""Matches infix patterns."""
internal_assert(len(tokens) > 1 and len(tokens) % 2 == 1, "invalid infix match tokens", tokens)
match = tokens[0]
for i in range(1, len(tokens), 2):
op, arg = tokens[i], tokens[i + 1]
self.add_check("(" + op + ")(" + item + ", " + arg + ")")
self.match(match, item)
def match(self, tokens, item):
"""Performs pattern-matching processing."""
for flag, get_handler in self.matchers.items():
if flag in tokens:
return get_handler(self)(tokens, item)
raise CoconutInternalException("invalid pattern-matching tokens", tokens)
def out(self):
"""Return pattern-matching code assuming check_var starts False."""
out = []
# set match_set_name_vars to sentinels
for name in self.names:
out.append(self.get_set_name_var(name) + " = _coconut_sentinel\n")
# match checkdefs setting check_var
closes = 0
for checks, defs in self.checkdefs:
if checks:
out.append("if " + paren_join(checks, "and") + ":\n" + openindent)
closes += 1
if defs:
out.append("\n".join(defs) + "\n")
out.append(self.check_var + " = True\n" + closeindent * closes)
# handle children
for children in self.child_groups:
out.append(
handle_indentation(
"""
if {check_var}:
{check_var} = False
{children}
""",
add_newline=True,
).format(
check_var=self.check_var,
children="".join(child.out() for child in children),
),
)
# commit variable definitions
name_set_code = []
for name, val in self.names.items():
name_set_code.append(
handle_indentation(
"""
if {set_name_var} is not _coconut_sentinel:
{name} = {val}
""",
add_newline=True,
).format(
set_name_var=self.get_set_name_var(name),
name=name,
val=val,
),
)
if name_set_code:
out.append(
handle_indentation(
"""
if {check_var}:
{name_set_code}
""",
).format(
check_var=self.check_var,
name_set_code="".join(name_set_code),
),
)
# handle guards
if self.guards:
out.append(
handle_indentation(
"""
if {check_var} and not ({guards}):
{check_var} = False
""",
add_newline=True,
).format(
check_var=self.check_var,
guards=paren_join(self.guards, "and"),
),
)
return "".join(out)
def build(self, stmts=None, set_check_var=True, invert=False):
"""Construct code for performing the match then executing stmts."""
out = []
if set_check_var:
out.append(self.check_var + " = False\n")
out.append(self.out())
if stmts is not None:
out.append("if " + ("not " if invert else "") + self.check_var + ":" + "\n" + openindent + "".join(stmts) + closeindent)
return "".join(out)
|
parsley/tests/models.py | blueyed/Django-parsley | 208 | 11130352 | #Django requires every app to have models.
from django.db import models
class Student(models.Model):
name = models.CharField(max_length=100)
|
pix2pix/src/model/models.py | lisabecker/DeepLearningImplementations | 2,010 | 11130355 | <reponame>lisabecker/DeepLearningImplementations
from keras.models import Model
from keras.layers.core import Flatten, Dense, Dropout, Activation, Lambda, Reshape
from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D
from keras.layers import Input, Concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D
import keras.backend as K
import numpy as np
def minb_disc(x):
diffs = K.expand_dims(x, 3) - K.expand_dims(K.permute_dimensions(x, [1, 2, 0]), 0)
abs_diffs = K.sum(K.abs(diffs), 2)
x = K.sum(K.exp(-abs_diffs), 2)
return x
def lambda_output(input_shape):
return input_shape[:2]
# def conv_block_unet(x, f, name, bn_mode, bn_axis, bn=True, dropout=False, strides=(2,2)):
# x = Conv2D(f, (3, 3), strides=strides, name=name, padding="same")(x)
# if bn:
# x = BatchNormalization(axis=bn_axis)(x)
# x = LeakyReLU(0.2)(x)
# if dropout:
# x = Dropout(0.5)(x)
# return x
# def up_conv_block_unet(x1, x2, f, name, bn_mode, bn_axis, bn=True, dropout=False):
# x1 = UpSampling2D(size=(2, 2))(x1)
# x = merge([x1, x2], mode="concat", concat_axis=bn_axis)
# x = Conv2D(f, (3, 3), name=name, padding="same")(x)
# if bn:
# x = BatchNormalization(axis=bn_axis)(x)
# x = Activation("relu")(x)
# if dropout:
# x = Dropout(0.5)(x)
# return x
def conv_block_unet(x, f, name, bn_mode, bn_axis, bn=True, strides=(2,2)):
x = LeakyReLU(0.2)(x)
x = Conv2D(f, (3, 3), strides=strides, name=name, padding="same")(x)
if bn:
x = BatchNormalization(axis=bn_axis)(x)
return x
def up_conv_block_unet(x, x2, f, name, bn_mode, bn_axis, bn=True, dropout=False):
x = Activation("relu")(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(f, (3, 3), name=name, padding="same")(x)
if bn:
x = BatchNormalization(axis=bn_axis)(x)
if dropout:
x = Dropout(0.5)(x)
x = Concatenate(axis=bn_axis)([x, x2])
return x
def deconv_block_unet(x, x2, f, h, w, batch_size, name, bn_mode, bn_axis, bn=True, dropout=False):
o_shape = (batch_size, h * 2, w * 2, f)
x = Activation("relu")(x)
x = Deconv2D(f, (3, 3), output_shape=o_shape, strides=(2, 2), padding="same")(x)
if bn:
x = BatchNormalization(axis=bn_axis)(x)
if dropout:
x = Dropout(0.5)(x)
x = Concatenate(axis=bn_axis)([x, x2])
return x
def generator_unet_upsampling(img_dim, bn_mode, model_name="generator_unet_upsampling"):
nb_filters = 64
if K.image_dim_ordering() == "channels_first":
bn_axis = 1
nb_channels = img_dim[0]
min_s = min(img_dim[1:])
else:
bn_axis = -1
nb_channels = img_dim[-1]
min_s = min(img_dim[:-1])
unet_input = Input(shape=img_dim, name="unet_input")
# Prepare encoder filters
nb_conv = int(np.floor(np.log(min_s) / np.log(2)))
list_nb_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)]
# Encoder
list_encoder = [Conv2D(list_nb_filters[0], (3, 3),
strides=(2, 2), name="unet_conv2D_1", padding="same")(unet_input)]
for i, f in enumerate(list_nb_filters[1:]):
name = "unet_conv2D_%s" % (i + 2)
conv = conv_block_unet(list_encoder[-1], f, name, bn_mode, bn_axis)
list_encoder.append(conv)
# Prepare decoder filters
list_nb_filters = list_nb_filters[:-2][::-1]
if len(list_nb_filters) < nb_conv - 1:
list_nb_filters.append(nb_filters)
# Decoder
list_decoder = [up_conv_block_unet(list_encoder[-1], list_encoder[-2],
list_nb_filters[0], "unet_upconv2D_1", bn_mode, bn_axis, dropout=True)]
for i, f in enumerate(list_nb_filters[1:]):
name = "unet_upconv2D_%s" % (i + 2)
# Dropout only on first few layers
if i < 2:
d = True
else:
d = False
conv = up_conv_block_unet(list_decoder[-1], list_encoder[-(i + 3)], f, name, bn_mode, bn_axis, dropout=d)
list_decoder.append(conv)
x = Activation("relu")(list_decoder[-1])
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(nb_channels, (3, 3), name="last_conv", padding="same")(x)
x = Activation("tanh")(x)
generator_unet = Model(inputs=[unet_input], outputs=[x])
return generator_unet
def generator_unet_deconv(img_dim, bn_mode, batch_size, model_name="generator_unet_deconv"):
assert K.backend() == "tensorflow", "Not implemented with theano backend"
nb_filters = 64
bn_axis = -1
h, w, nb_channels = img_dim
min_s = min(img_dim[:-1])
unet_input = Input(shape=img_dim, name="unet_input")
# Prepare encoder filters
nb_conv = int(np.floor(np.log(min_s) / np.log(2)))
list_nb_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)]
# Encoder
list_encoder = [Conv2D(list_nb_filters[0], (3, 3),
strides=(2, 2), name="unet_conv2D_1", padding="same")(unet_input)]
# update current "image" h and w
h, w = h / 2, w / 2
for i, f in enumerate(list_nb_filters[1:]):
name = "unet_conv2D_%s" % (i + 2)
conv = conv_block_unet(list_encoder[-1], f, name, bn_mode, bn_axis)
list_encoder.append(conv)
h, w = h / 2, w / 2
# Prepare decoder filters
list_nb_filters = list_nb_filters[:-1][::-1]
if len(list_nb_filters) < nb_conv - 1:
list_nb_filters.append(nb_filters)
# Decoder
list_decoder = [deconv_block_unet(list_encoder[-1], list_encoder[-2],
list_nb_filters[0], h, w, batch_size,
"unet_upconv2D_1", bn_mode, bn_axis, dropout=True)]
h, w = h * 2, w * 2
for i, f in enumerate(list_nb_filters[1:]):
name = "unet_upconv2D_%s" % (i + 2)
# Dropout only on first few layers
if i < 2:
d = True
else:
d = False
conv = deconv_block_unet(list_decoder[-1], list_encoder[-(i + 3)], f, h,
w, batch_size, name, bn_mode, bn_axis, dropout=d)
list_decoder.append(conv)
h, w = h * 2, w * 2
x = Activation("relu")(list_decoder[-1])
o_shape = (batch_size,) + img_dim
x = Deconv2D(nb_channels, (3, 3), output_shape=o_shape, strides=(2, 2), padding="same")(x)
x = Activation("tanh")(x)
generator_unet = Model(inputs=[unet_input], outputs=[x])
return generator_unet
def DCGAN_discriminator(img_dim, nb_patch, bn_mode, model_name="DCGAN_discriminator", use_mbd=True):
"""
Discriminator model of the DCGAN
args : img_dim (tuple of int) num_chan, height, width
pretr_weights_file (str) file holding pre trained weights
returns : model (keras NN) the Neural Net model
"""
list_input = [Input(shape=img_dim, name="disc_input_%s" % i) for i in range(nb_patch)]
if K.image_dim_ordering() == "channels_first":
bn_axis = 1
else:
bn_axis = -1
nb_filters = 64
nb_conv = int(np.floor(np.log(img_dim[1]) / np.log(2)))
list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)]
# First conv
x_input = Input(shape=img_dim, name="discriminator_input")
x = Conv2D(list_filters[0], (3, 3), strides=(2, 2), name="disc_conv2d_1", padding="same")(x_input)
x = BatchNormalization(axis=bn_axis)(x)
x = LeakyReLU(0.2)(x)
# Next convs
for i, f in enumerate(list_filters[1:]):
name = "disc_conv2d_%s" % (i + 2)
x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same")(x)
x = BatchNormalization(axis=bn_axis)(x)
x = LeakyReLU(0.2)(x)
x_flat = Flatten()(x)
x = Dense(2, activation="softmax", name="disc_dense")(x_flat)
PatchGAN = Model(inputs=[x_input], outputs=[x, x_flat], name="PatchGAN")
print("PatchGAN summary")
PatchGAN.summary()
x = [PatchGAN(patch)[0] for patch in list_input]
x_mbd = [PatchGAN(patch)[1] for patch in list_input]
if len(x) > 1:
x = Concatenate(axis=bn_axis)(x)
else:
x = x[0]
if use_mbd:
if len(x_mbd) > 1:
x_mbd = Concatenate(axis=bn_axis)(x_mbd)
else:
x_mbd = x_mbd[0]
num_kernels = 100
dim_per_kernel = 5
M = Dense(num_kernels * dim_per_kernel, use_bias=False, activation=None)
MBD = Lambda(minb_disc, output_shape=lambda_output)
x_mbd = M(x_mbd)
x_mbd = Reshape((num_kernels, dim_per_kernel))(x_mbd)
x_mbd = MBD(x_mbd)
x = Concatenate(axis=bn_axis)([x, x_mbd])
x_out = Dense(2, activation="softmax", name="disc_output")(x)
discriminator_model = Model(inputs=list_input, outputs=[x_out], name=model_name)
return discriminator_model
def DCGAN(generator, discriminator_model, img_dim, patch_size, image_dim_ordering):
gen_input = Input(shape=img_dim, name="DCGAN_input")
generated_image = generator(gen_input)
if image_dim_ordering == "channels_first":
h, w = img_dim[1:]
else:
h, w = img_dim[:-1]
ph, pw = patch_size
list_row_idx = [(i * ph, (i + 1) * ph) for i in range(h // ph)]
list_col_idx = [(i * pw, (i + 1) * pw) for i in range(w // pw)]
list_gen_patch = []
for row_idx in list_row_idx:
for col_idx in list_col_idx:
if image_dim_ordering == "channels_last":
x_patch = Lambda(lambda z: z[:, row_idx[0]:row_idx[1], col_idx[0]:col_idx[1], :])(generated_image)
else:
x_patch = Lambda(lambda z: z[:, :, row_idx[0]:row_idx[1], col_idx[0]:col_idx[1]])(generated_image)
list_gen_patch.append(x_patch)
DCGAN_output = discriminator_model(list_gen_patch)
DCGAN = Model(inputs=[gen_input],
outputs=[generated_image, DCGAN_output],
name="DCGAN")
return DCGAN
def load(model_name, img_dim, nb_patch, bn_mode, use_mbd, batch_size, do_plot):
if model_name == "generator_unet_upsampling":
model = generator_unet_upsampling(img_dim, bn_mode, model_name=model_name)
model.summary()
if do_plot:
from keras.utils import plot_model
plot_model(model, to_file="../../figures/%s.png" % model_name, show_shapes=True, show_layer_names=True)
return model
if model_name == "generator_unet_deconv":
model = generator_unet_deconv(img_dim, bn_mode, batch_size, model_name=model_name)
model.summary()
if do_plot:
from keras.utils import plot_model
plot_model(model, to_file="../../figures/%s.png" % model_name, show_shapes=True, show_layer_names=True)
return model
if model_name == "DCGAN_discriminator":
model = DCGAN_discriminator(img_dim, nb_patch, bn_mode, model_name=model_name, use_mbd=use_mbd)
model.summary()
if do_plot:
from keras.utils import plot_model
plot_model(model, to_file="../../figures/%s.png" % model_name, show_shapes=True, show_layer_names=True)
return model
if __name__ == "__main__":
# load("generator_unet_deconv", (256, 256, 3), 16, 2, False, 32)
load("generator_unet_upsampling", (256, 256, 3), 16, 2, False, 32)
|
venv/Lib/site-packages/mpl_toolkits/axisartist/grid_finder.py | EkremBayar/bayar | 603 | 11130360 | import numpy as np
from matplotlib import _api, ticker as mticker
from matplotlib.transforms import Bbox, Transform
from .clip_path import clip_line_to_rect
class ExtremeFinderSimple:
"""
A helper class to figure out the range of grid lines that need to be drawn.
"""
def __init__(self, nx, ny):
"""
Parameters
----------
nx, ny : int
The number of samples in each direction.
"""
self.nx = nx
self.ny = ny
def __call__(self, transform_xy, x1, y1, x2, y2):
"""
Compute an approximation of the bounding box obtained by applying
*transform_xy* to the box delimited by ``(x1, y1, x2, y2)``.
The intended use is to have ``(x1, y1, x2, y2)`` in axes coordinates,
and have *transform_xy* be the transform from axes coordinates to data
coordinates; this method then returns the range of data coordinates
that span the actual axes.
The computation is done by sampling ``nx * ny`` equispaced points in
the ``(x1, y1, x2, y2)`` box and finding the resulting points with
extremal coordinates; then adding some padding to take into account the
finite sampling.
As each sampling step covers a relative range of *1/nx* or *1/ny*,
the padding is computed by expanding the span covered by the extremal
coordinates by these fractions.
"""
x, y = np.meshgrid(
np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny))
xt, yt = transform_xy(np.ravel(x), np.ravel(y))
return self._add_pad(xt.min(), xt.max(), yt.min(), yt.max())
def _add_pad(self, x_min, x_max, y_min, y_max):
"""Perform the padding mentioned in `__call__`."""
dx = (x_max - x_min) / self.nx
dy = (y_max - y_min) / self.ny
return x_min - dx, x_max + dx, y_min - dy, y_max + dy
class GridFinder:
def __init__(self,
transform,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
transform : transform from the image coordinate (which will be
the transData of the axes to the world coordinate.
or transform = (transform_xy, inv_transform_xy)
locator1, locator2 : grid locator for 1st and 2nd axis.
"""
if extreme_finder is None:
extreme_finder = ExtremeFinderSimple(20, 20)
if grid_locator1 is None:
grid_locator1 = MaxNLocator()
if grid_locator2 is None:
grid_locator2 = MaxNLocator()
if tick_formatter1 is None:
tick_formatter1 = FormatterPrettyPrint()
if tick_formatter2 is None:
tick_formatter2 = FormatterPrettyPrint()
self.extreme_finder = extreme_finder
self.grid_locator1 = grid_locator1
self.grid_locator2 = grid_locator2
self.tick_formatter1 = tick_formatter1
self.tick_formatter2 = tick_formatter2
self.update_transform(transform)
def get_grid_info(self, x1, y1, x2, y2):
"""
lon_values, lat_values : list of grid values. if integer is given,
rough number of grids in each direction.
"""
extremes = self.extreme_finder(self.inv_transform_xy, x1, y1, x2, y2)
# min & max rage of lat (or lon) for each grid line will be drawn.
# i.e., gridline of lon=0 will be drawn from lat_min to lat_max.
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = self.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = self.grid_locator2(lat_min, lat_max)
lon_values = lon_levs[:lon_n] / lon_factor
lat_values = lat_levs[:lat_n] / lat_factor
lon_lines, lat_lines = self._get_raw_grid_lines(lon_values,
lat_values,
lon_min, lon_max,
lat_min, lat_max)
ddx = (x2-x1)*1.e-10
ddy = (y2-y1)*1.e-10
bb = Bbox.from_extents(x1-ddx, y1-ddy, x2+ddx, y2+ddy)
grid_info = {
"extremes": extremes,
"lon_lines": lon_lines,
"lat_lines": lat_lines,
"lon": self._clip_grid_lines_and_find_ticks(
lon_lines, lon_values, lon_levs, bb),
"lat": self._clip_grid_lines_and_find_ticks(
lat_lines, lat_values, lat_levs, bb),
}
tck_labels = grid_info["lon"]["tick_labels"] = {}
for direction in ["left", "bottom", "right", "top"]:
levs = grid_info["lon"]["tick_levels"][direction]
tck_labels[direction] = self.tick_formatter1(
direction, lon_factor, levs)
tck_labels = grid_info["lat"]["tick_labels"] = {}
for direction in ["left", "bottom", "right", "top"]:
levs = grid_info["lat"]["tick_levels"][direction]
tck_labels[direction] = self.tick_formatter2(
direction, lat_factor, levs)
return grid_info
def _get_raw_grid_lines(self,
lon_values, lat_values,
lon_min, lon_max, lat_min, lat_max):
lons_i = np.linspace(lon_min, lon_max, 100) # for interpolation
lats_i = np.linspace(lat_min, lat_max, 100)
lon_lines = [self.transform_xy(np.full_like(lats_i, lon), lats_i)
for lon in lon_values]
lat_lines = [self.transform_xy(lons_i, np.full_like(lons_i, lat))
for lat in lat_values]
return lon_lines, lat_lines
def _clip_grid_lines_and_find_ticks(self, lines, values, levs, bb):
gi = {
"values": [],
"levels": [],
"tick_levels": dict(left=[], bottom=[], right=[], top=[]),
"tick_locs": dict(left=[], bottom=[], right=[], top=[]),
"lines": [],
}
tck_levels = gi["tick_levels"]
tck_locs = gi["tick_locs"]
for (lx, ly), v, lev in zip(lines, values, levs):
xy, tcks = clip_line_to_rect(lx, ly, bb)
if not xy:
continue
gi["levels"].append(v)
gi["lines"].append(xy)
for tck, direction in zip(tcks,
["left", "bottom", "right", "top"]):
for t in tck:
tck_levels[direction].append(lev)
tck_locs[direction].append(t)
return gi
def update_transform(self, aux_trans):
if not isinstance(aux_trans, Transform) and len(aux_trans) != 2:
raise TypeError("'aux_trans' must be either a Transform instance "
"or a pair of callables")
self._aux_transform = aux_trans
def transform_xy(self, x, y):
aux_trf = self._aux_transform
if isinstance(aux_trf, Transform):
return aux_trf.transform(np.column_stack([x, y])).T
else:
transform_xy, inv_transform_xy = aux_trf
return transform_xy(x, y)
def inv_transform_xy(self, x, y):
aux_trf = self._aux_transform
if isinstance(aux_trf, Transform):
return aux_trf.inverted().transform(np.column_stack([x, y])).T
else:
transform_xy, inv_transform_xy = aux_trf
return inv_transform_xy(x, y)
def update(self, **kw):
for k in kw:
if k in ["extreme_finder",
"grid_locator1",
"grid_locator2",
"tick_formatter1",
"tick_formatter2"]:
setattr(self, k, kw[k])
else:
raise ValueError("Unknown update property '%s'" % k)
class MaxNLocator(mticker.MaxNLocator):
def __init__(self, nbins=10, steps=None,
trim=True,
integer=False,
symmetric=False,
prune=None):
# trim argument has no effect. It has been left for API compatibility
super().__init__(nbins, steps=steps, integer=integer,
symmetric=symmetric, prune=prune)
self.create_dummy_axis()
self._factor = 1
def __call__(self, v1, v2):
self.set_bounds(v1 * self._factor, v2 * self._factor)
locs = super().__call__()
return np.array(locs), len(locs), self._factor
@_api.deprecated("3.3")
def set_factor(self, f):
self._factor = f
class FixedLocator:
def __init__(self, locs):
self._locs = locs
self._factor = 1
def __call__(self, v1, v2):
v1, v2 = sorted([v1 * self._factor, v2 * self._factor])
locs = np.array([l for l in self._locs if v1 <= l <= v2])
return locs, len(locs), self._factor
@_api.deprecated("3.3")
def set_factor(self, f):
self._factor = f
# Tick Formatter
class FormatterPrettyPrint:
def __init__(self, useMathText=True):
self._fmt = mticker.ScalarFormatter(
useMathText=useMathText, useOffset=False)
self._fmt.create_dummy_axis()
def __call__(self, direction, factor, values):
return self._fmt.format_ticks(values)
class DictFormatter:
def __init__(self, format_dict, formatter=None):
"""
format_dict : dictionary for format strings to be used.
formatter : fall-back formatter
"""
super().__init__()
self._format_dict = format_dict
self._fallback_formatter = formatter
def __call__(self, direction, factor, values):
"""
factor is ignored if value is found in the dictionary
"""
if self._fallback_formatter:
fallback_strings = self._fallback_formatter(
direction, factor, values)
else:
fallback_strings = [""] * len(values)
return [self._format_dict.get(k, v)
for k, v in zip(values, fallback_strings)]
|
leetcode.com/python/150_Evaluate_Reverse_Polish_Notation.py | vansh-tiwari/coding-interview-gym | 713 | 11130370 | <reponame>vansh-tiwari/coding-interview-gym
# https://tinyurl.com/r35ffgt
from collections import deque
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
tokensDeque = deque(tokens)
stack = []
opes = ["+", "-", "*", "/"]
while tokensDeque:
currentToken = tokensDeque.popleft()
if currentToken in opes:
firstNum = int(stack.pop())
secondNum = int(stack.pop())
result = 0
if currentToken == "+":
result = secondNum + firstNum
elif currentToken == "-":
result = secondNum - firstNum
elif currentToken == "*":
result = secondNum * firstNum
else:
if firstNum*secondNum < 0 and secondNum%firstNum != 0:
result = secondNum // firstNum + 1
else:
result = secondNum // firstNum
stack.append(str(result))
else:
stack.append(currentToken)
return stack[-1]
|
hpccm/building_blocks/hdf5.py | robertmaynard/hpc-container-maker | 340 | 11130372 | <gh_stars>100-1000
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""HDF5 building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import re
from copy import copy as _copy
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class hdf5(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `hdf5` building block downloads, configures, builds, and
installs the [HDF5](http://www.hdfgroup.org) component. Depending
on the parameters, the source will be downloaded from the web
(default) or copied from a source directory in the local build
context.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
configure_opts: List of options to pass to `configure`. The
default values are `--enable-cxx` and `--enable-fortran`.
directory: Path to the unpackaged source directory relative to the
local build context. The default value is empty. If this is
defined, the source in the local build context will be used rather
than downloading the source from the web.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH`, `LIBRARY_PATH`, `PATH`, and others)
should be modified to include HDF5. The default is True.
ldconfig: Boolean flag to specify whether the HDF5 library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the HDF5 library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `bzip2`, `file`,
`make`, `wget`, and `zlib1g-dev`. For RHEL-based Linux
distributions the default values are `bzip2`, `file`, `make`,
`wget` and `zlib-devel`.
prefix: The top level install location. The default value is
`/usr/local/hdf5`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of HDF5 source to download. This value is
ignored if `directory` is set. The default value is `1.12.0`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
hdf5(prefix='/opt/hdf5/1.10.1', version='1.10.1')
```
```python
hdf5(directory='sources/hdf5-1.10.1')
```
```python
n = nvhpc(eula=True)
hdf5(toolchain=n.toolchain)
```
```python
hdf5(check=True, configure_opts=['--enable-cxx', '--enable-fortran',
'--enable-profiling=yes'])
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(hdf5, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl', 'http://www.hdfgroup.org/ftp/HDF5/releases')
self.__check = kwargs.pop('check', False)
self.__configure_opts = kwargs.pop('configure_opts',
['--enable-cxx',
'--enable-fortran'])
self.__ospackages = kwargs.pop('ospackages', [])
self.__prefix = kwargs.pop('prefix', '/usr/local/hdf5')
# Create a copy of the toolchain so that it can be modified
# without impacting the original
self.__toolchain = _copy(kwargs.pop('toolchain', toolchain()))
self.__runtime_ospackages = [] # Filled in by __distro()
self.__version = kwargs.pop('version', '1.12.0')
# Set the Linux distribution specific parameters
self.__distro()
# Set the download specific parameters
self.__download()
# Set the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['HDF5_DIR'] = self.__prefix
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'))
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# PIC workaround when using the NVIDIA compilers
if self.__toolchain.FC and re.match('.*nvfortran',
self.__toolchain.FC):
if not self.__toolchain.FCFLAGS:
self.__toolchain.FCFLAGS = '-fpic -DPIC'
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
check=self.__check,
configure_opts=self.__configure_opts,
comment=False,
devel_environment=self.environment_variables,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
toolchain=self.__toolchain,
url=self.__url,
**kwargs)
# Container instructions
self += comment('HDF5 version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['bzip2', 'file', 'make', 'wget',
'zlib1g-dev']
self.__runtime_ospackages = ['zlib1g']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['bzip2', 'file', 'make', 'wget',
'zlib-devel']
if self.__check:
self.__ospackages.append('diffutils')
self.__runtime_ospackages = ['zlib']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __download(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# The download URL has the format contains vMAJOR.MINOR in the
# path and the tarball contains MAJOR.MINOR.REVISION, so pull
# apart the full version to get the MAJOR and MINOR components.
match = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)', self.__version)
major_minor = '{0}.{1}'.format(match.groupdict()['major'],
match.groupdict()['minor'])
tarball = 'hdf5-{}.tar.bz2'.format(self.__version)
self.__url = '{0}/hdf5-{1}/hdf5-{2}/src/{3}'.format(
self.__baseurl, major_minor, self.__version, tarball)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
h = hdf5(...)
Stage0 += h
Stage1 += h.runtime()
```
"""
self.rt += comment('HDF5')
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
|
tests/test_acoustic_models/test_lstm_for_causal_lm.py | techthiyanes/openspeech | 207 | 11130373 | import unittest
import logging
import torch
from openspeech.criterion import Perplexity, PerplexityLossConfigs
from openspeech.lm.lstm_lm import LSTMForLanguageModel
from openspeech.utils import DUMMY_TARGETS, build_dummy_configs
from openspeech.tokenizers.ksponspeech.character import KsponSpeechCharacterTokenizer
logger = logging.getLogger(__name__)
class TestLSTMForLanguageModel(unittest.TestCase):
def test_lstm_forward(self):
configs = build_dummy_configs(
criterion_configs=PerplexityLossConfigs(),
)
vocab = KsponSpeechCharacterTokenizer(configs)
model = LSTMForLanguageModel(
num_classes=4,
max_length=32,
hidden_state_dim=64,
pad_id=0,
sos_id=1,
eos_id=2,
rnn_type='lstm',
)
criterion = Perplexity(configs, vocab)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-04)
for i in range(3):
logits = model(DUMMY_TARGETS, teacher_forcing_ratio=1.0)
loss = criterion(logits, DUMMY_TARGETS[:, 1:])
loss.backward()
optimizer.step()
assert type(loss.item()) == float
for i in range(3):
logits = model(DUMMY_TARGETS, teacher_forcing_ratio=1.0)
loss = criterion(logits, DUMMY_TARGETS[:, 1:])
loss.backward()
optimizer.step()
assert type(loss.item()) == float
for i in range(3):
logits = model(DUMMY_TARGETS, teacher_forcing_ratio=0.0)
loss = criterion(logits, DUMMY_TARGETS[:, 1:])
loss.backward()
optimizer.step()
assert type(loss.item()) == float
def test_gru_forward(self):
configs = build_dummy_configs(
criterion_configs=PerplexityLossConfigs(),
)
vocab = KsponSpeechCharacterTokenizer(configs)
model = LSTMForLanguageModel(
num_classes=4,
max_length=32,
hidden_state_dim=64,
pad_id=0,
sos_id=1,
eos_id=2,
rnn_type='gru',
)
criterion = Perplexity(configs, vocab)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-04)
for i in range(3):
logits = model(DUMMY_TARGETS, teacher_forcing_ratio=1.0)
loss = criterion(logits, DUMMY_TARGETS[:, 1:])
loss.backward()
optimizer.step()
assert type(loss.item()) == float
for i in range(3):
logits = model(DUMMY_TARGETS, teacher_forcing_ratio=0.0)
loss = criterion(logits, DUMMY_TARGETS[:, 1:])
loss.backward()
optimizer.step()
assert type(loss.item()) == float
if __name__ == '__main__':
unittest.main()
|
mayan/apps/logging/tests/test_models.py | nattangwiwat/Mayan-EDMS-recitation | 336 | 11130379 | <gh_stars>100-1000
from mayan.apps.testing.tests.base import BaseTestCase
from ..models import ErrorLogPartitionEntry
from .mixins import LoggingTextMixin
class LoggingModelTestCase(LoggingTextMixin, BaseTestCase):
def setUp(self):
super().setUp()
self._create_error_log_test_object()
self._create_error_log_entry()
def test_entries_limit(self):
self.error_log.limit = 3
self.test_object.error_log.create(text='1')
self.test_object.error_log.create(text='2')
self.test_object.error_log.create(text='3')
self.test_object.error_log.create(text='4')
self.assertEqual(
list(self.test_object.error_log.values_list('text', flat=True)),
['2', '3', '4']
)
def test_object_deletion(self):
self.test_object.delete()
self.assertEqual(ErrorLogPartitionEntry.objects.count(), 0)
|
lib/django-1.4/django/contrib/gis/tests/distapp/models.py | MiCHiLU/google_appengine_sdk | 790 | 11130382 | <reponame>MiCHiLU/google_appengine_sdk<gh_stars>100-1000
from django.contrib.gis.db import models
class SouthTexasCity(models.Model):
"City model on projected coordinate system for South Texas."
name = models.CharField(max_length=30)
point = models.PointField(srid=32140)
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasCityFt(models.Model):
"Same City model as above, but U.S. survey feet are the units."
name = models.CharField(max_length=30)
point = models.PointField(srid=2278)
objects = models.GeoManager()
def __unicode__(self): return self.name
class AustraliaCity(models.Model):
"City model for Australia, using WGS84."
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class CensusZipcode(models.Model):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
name = models.CharField(max_length=5)
poly = models.PolygonField(srid=4269)
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasZipcode(models.Model):
"Model for a few South Texas ZIP codes."
name = models.CharField(max_length=5)
poly = models.PolygonField(srid=32140, null=True)
objects = models.GeoManager()
def __unicode__(self): return self.name
class Interstate(models.Model):
"Geodetic model for U.S. Interstates."
name = models.CharField(max_length=10)
path = models.LineStringField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasInterstate(models.Model):
"Projected model for South Texas Interstates."
name = models.CharField(max_length=10)
path = models.LineStringField(srid=32140)
objects = models.GeoManager()
def __unicode__(self): return self.name
|
tests/basics/bytearray1.py | LabAixBidouille/micropython | 303 | 11130383 | <gh_stars>100-1000
print(bytearray(4))
a = bytearray([1, 2, 200])
print(type(a))
print(a[0], a[2])
print(a[-1])
print(a)
a[2] = 255
print(a[-1])
a.append(10)
print(len(a))
s = 0
for i in a:
s += i
print(s)
print(a[1:])
print(a[:-1])
print(a[2:3])
print(str(bytearray(b"123"), "utf-8"))
# Comparisons
print(bytearray([1]) == bytearray([1]))
print(bytearray([1]) == bytearray([2]))
print(bytearray([1]) == b"1")
print(b"1" == bytearray([1]))
print(bytearray() == bytearray())
# TODO: other comparisons
|
quantlib/test/test_sabr.py | bpmbank/pyql | 488 | 11130395 | <filename>quantlib/test/test_sabr.py
import math
import unittest
from quantlib.time.api import Date
from quantlib.quotes import SimpleQuote
from quantlib.settings import Settings
from quantlib.termstructures.volatility.sabr_interpolated_smilesection \
import SabrInterpolatedSmileSection
from quantlib.termstructures.volatility.sabr import unsafe_sabr_volatility
import numpy as np
class SabrTestCase(unittest.TestCase):
def setUp(self):
option_date = Date(20, 9, 2017)
Settings().evaluation_date = Date(4, 8, 2017)
self.strikes = (np.array([50, 55, 57.5, 60, 62.5, 65, 67.5,
70, 75, 80, 85, 90, 95, 100]) * 1e-4).tolist()
vol = np.array([28.5, 31.6, 33.7, 36.1, 38.7, 41.5, 44.1,
46.5, 50.8, 54.4, 57.3, 59.8, 61.8, 63.6]) * 1e-2
vol_quotes = [SimpleQuote(q) for q in vol]
self.forward = SimpleQuote(58.71e-4)
atm_vol = (60-58.71)/1.5*33.7 + (58.71-57.5)/1.5*36.1
self.sabr_smile = SabrInterpolatedSmileSection(option_date, self.forward, self.strikes, False,
SimpleQuote(0.4), vol_quotes, 0.1, 1, 0.1, 0.5,
is_beta_fixed=True)
def test_params(self):
alpha, rho, nu = self.sabr_smile.alpha, self.sabr_smile.rho, self.sabr_smile.nu
self.assertTrue(alpha > 0)
self.assertTrue(rho > -1 and rho < 1)
self.assertTrue(nu > 0)
def test_errors(self):
self.assertTrue(self.sabr_smile.max_error > self.sabr_smile.rms_error)
def test_sabr_formula(self):
alpha, rho, nu = self.sabr_smile.alpha, self.sabr_smile.rho, self.sabr_smile.nu
for K in self.strikes:
self.assertEqual(self.sabr_smile.volatility(K),
unsafe_sabr_volatility(K,
self.forward.value,
self.sabr_smile.exercise_time,
alpha, 1., nu, rho))
def tearDown(self):
del self.sabr_smile
self.sabr_smile = None
if __name__ == "__main__":
unittest.main()
|
ai/src/tests/test_stateful.py | ScriptBox99/spiceai | 713 | 11130396 | from io import StringIO
import unittest
import pandas as pd
import numpy as np
from connector.stateful import StatefulConnector
from data_manager.base_manager import DataParam
from data_manager.time_series_manager import TimeSeriesDataManager
from proto.aiengine.v1 import aiengine_pb2
class StatefulConnectorTests(unittest.TestCase):
def __init__(self, method_name='runTest'):
super().__init__(method_name)
self.original_csv = "time,baz\n5,0.0\n9,2.0\n20,2.0\n30,3.0\n40,4.0\n50,5.0"
self.original_data = pd.read_csv(StringIO(self.original_csv))
self.original_data["time"] = pd.to_datetime(self.original_data["time"], unit="s")
self.original_data = self.original_data.set_index("time")
self.granularity = pd.to_timedelta(10, unit="s")
self.epoch_time = pd.to_datetime(10, unit="s")
self.period = pd.to_timedelta(50, unit="s")
self.interval = pd.to_timedelta(20, unit="s")
def setUp(self):
self.data_manager = TimeSeriesDataManager(
param=DataParam(
epoch_time=self.epoch_time,
period_secs=self.period,
interval_secs=self.interval,
granularity_secs=self.granularity),
fields={
"foo": aiengine_pb2.FieldData(initializer=10.0),
"bar": aiengine_pb2.FieldData(initializer=5.0),
"baz": aiengine_pb2.FieldData(initializer=1.0),
},
action_rewards={
"foo_action": "reward = 1",
"bar_action": "reward = 1",
},
actions_order={
"foo_action": 0,
"bar_action": 1,
},
laws=["bar >= 0"],
external_reward_funcs="",
)
self.data_manager.merge_data(self.original_data)
self.data_manager.reset()
self.data_manager.start_training()
def tearDown(self):
self.data_manager.end_training()
def test_apply_action(self):
action_effects = {
"foo_action": "foo += 5\nbar -= 1",
"bar_action": "foo += baz",
}
stateful_connector = StatefulConnector(self.data_manager, action_effects)
current_window = self.data_manager.get_current_window()
is_valid = stateful_connector.apply_action(0, current_window)
self.assertTrue(is_valid)
index_to_check = pd.to_datetime(30, unit="s")
expected_bar = 4.0
expected_foo = 15.0
actual_bar = self.data_manager.massive_table_training_filled.loc[index_to_check]["bar"]
actual_foo = self.data_manager.massive_table_training_filled.loc[index_to_check]["foo"]
self.assertEqual(expected_bar, actual_bar)
self.assertEqual(expected_foo, actual_foo)
self.assertTrue(
np.isnan(
self.data_manager.massive_table_sparse.loc[index_to_check + self.granularity][
"bar"
]
)
)
def test_laws(self):
action_effects = {
"foo_action": "foo += 5\nbar -= 10",
"bar_action": "foo += baz",
}
stateful_connector = StatefulConnector(self.data_manager, action_effects)
current_window = self.data_manager.get_current_window()
# This should not be valid and not apply the update
is_valid = stateful_connector.apply_action(0, current_window)
self.assertFalse(is_valid)
index_to_check = pd.to_datetime(30, unit="s")
actual_bar = self.data_manager.massive_table_sparse.loc[index_to_check]["bar"]
actual_foo = self.data_manager.massive_table_sparse.loc[index_to_check]["foo"]
self.assertTrue(np.isnan(actual_bar))
self.assertTrue(np.isnan(actual_foo))
def test_is_calling_merge_row(self):
original_fill_table = self.data_manager._fill_table # pylint: disable=protected-access
def new_fill_table():
raise Exception("Should not call this on apply_action")
try:
self.data_manager._fill_table = new_fill_table # pylint: disable=protected-access
action_effects = {
"foo_action": "foo += 5\nbar -= 1",
"bar_action": "foo += baz",
}
stateful_connector = StatefulConnector(self.data_manager, action_effects)
current_window = self.data_manager.get_current_window()
is_valid = stateful_connector.apply_action(0, current_window)
self.assertTrue(is_valid)
finally:
self.data_manager._fill_table = original_fill_table # pylint: disable=protected-access
if __name__ == "__main__":
unittest.main()
|
cirq/interop/quirk/__init__.py | lilies/Cirq | 3,326 | 11130407 | <filename>cirq/interop/quirk/__init__.py<gh_stars>1000+
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to interoperating with Quirk, a drag-and-drop circuit simulator.
References:
https://github.com/strilanc/quirk - Quirk source code.
https://algassert.com/quirk - Live version of Quirk.
"""
# Imports from cells are only to ensure operation reprs work correctly.
from cirq.interop.quirk.cells import (
QuirkArithmeticOperation,
QuirkInputRotationOperation,
QuirkQubitPermutationGate,
)
from cirq.interop.quirk.url_to_circuit import (
quirk_json_to_circuit,
quirk_url_to_circuit,
)
|
tests/test_wire/test_named.py | leonardt/magma | 167 | 11130411 | from magma import *
from magma.testing import check_files_equal
def test_named1():
class Buf(Circuit):
name = "Buf"
io = IO(I=In(Bit), O=Out(Bit))
class main(Circuit):
name = "main"
io = IO(I=In(Bit), O=Out(Bit))
buf = Buf()
buf(I=io.I)
wire(buf.O, io.O)
compile("build/named1", main, output="verilog")
assert check_files_equal(__file__, "build/named1.v", "gold/named1.v")
def test_named2():
class And2(Circuit):
name = "And2"
io = IO(I0=In(Bit), I1=In(Bit), O=Out(Bit))
class main(Circuit):
name = "main"
io = IO(I=In(Bits[2]), O=Out(Bit))
a = And2()
a(I0=io.I[0], I1=io.I[1])
wire(a.O, io.O)
compile("build/named2a", main, output="verilog")
assert check_files_equal(__file__, "build/named2a.v", "gold/named2a.v")
def test_named3():
class And2(Circuit):
name = "And2"
io = IO(I0=In(Bit), I1=In(Bit), O=Out(Bit))
class main(Circuit):
name = "main"
io = IO(I=In(Bits[2]), O=Out(Bit))
a = And2()
a(I0=io.I[0])
a(I1=io.I[1])
wire(a.O, io.O)
compile("build/named2b", main, output="verilog")
assert check_files_equal(__file__, "build/named2b.v", "gold/named2b.v")
def test_named4():
class And2(Circuit):
name = "And2"
io = IO(I0=In(Bit), I1=In(Bit), O=Out(Bit))
class main(Circuit):
name = "main"
io = IO(I=In(Bits[2]), O=Out(Bit))
a = And2()
a(I1=io.I[1])
a(I0=io.I[0])
wire(a.O, io.O)
compile("build/named2c", main, output="verilog")
assert check_files_equal(__file__, "build/named2c.v", "gold/named2c.v")
|
examples/normals/simple.py | mitchkaden/meteostat-python | 133 | 11130414 | """
Example: Simple climate data access
Meteorological data provided by Meteostat (https://dev.meteostat.net)
under the terms of the Creative Commons Attribution-NonCommercial
4.0 International Public License.
The code is licensed under the MIT license.
"""
import matplotlib.pyplot as plt
from meteostat import Normals
# Get normals
data = Normals('72407')
data = data.normalize().fetch()
# Plot chart
data.plot(y=['tavg', 'tmin', 'tmax'])
plt.show()
|
src/Products/PageTemplates/tests/test_engine.py | rbanffy/Zope | 289 | 11130428 | <reponame>rbanffy/Zope
import os
import unittest
from Testing.ZopeTestCase import ZopeTestCase
from Testing.ZopeTestCase.sandbox import Sandboxed
from .util import useChameleonEngine
path = os.path.dirname(__file__)
class TestPatches(Sandboxed, ZopeTestCase):
def afterSetUp(self):
useChameleonEngine()
def test_pagetemplate(self):
from Products.PageTemplates.PageTemplate import PageTemplate
template = PageTemplate()
# test rendering engine
with open(os.path.join(path, "simple.pt")) as fd:
data = fd.read()
template.write(data)
self.assertTrue('world' in template())
# test arguments
with open(os.path.join(path, "options.pt")) as fd:
data = fd.read()
template.write(data)
self.assertTrue('Hello world' in template(greeting='Hello world'))
def test_pagetemplatefile(self):
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
# test rendering engine
template = PageTemplateFile(os.path.join(path, "simple.pt"))
template = template.__of__(self.folder)
self.assertTrue('world' in template())
def test_pagetemplatefile_processing_instruction_skipped(self):
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
# test rendering engine
template = PageTemplateFile(os.path.join(path, "pi.pt"))
template = template.__of__(self.folder)
self.assertIn('world', template())
def test_zopepagetemplate(self):
from Products.PageTemplates.ZopePageTemplate import \
manage_addPageTemplate
template = manage_addPageTemplate(self.folder, 'test')
# aq-wrap before we proceed
template = template.__of__(self.folder)
# test rendering engine
with open(os.path.join(path, "simple.pt")) as fd:
data = fd.read()
template.write(data)
self.assertTrue('world' in template())
# test arguments
with open(os.path.join(path, "options.pt")) as fd:
data = fd.read()
template.write(data)
self.assertTrue('Hello world' in template(
greeting='Hello world'))
# test commit
import transaction
transaction.commit()
def test_zopepagetemplate_processing_instruction_skipped(self):
from Products.PageTemplates.ZopePageTemplate import \
manage_addPageTemplate
template = manage_addPageTemplate(self.folder, 'test')
# aq-wrap before we proceed
template = template.__of__(self.folder)
# test rendering engine
with open(os.path.join(path, "pi.pt")) as fd:
data = fd.read()
template.write(data)
self.assertIn('world', template())
def test_macros_access(self):
from Products.PageTemplates.ZopePageTemplate import \
manage_addPageTemplate
from zExceptions import Unauthorized
template = manage_addPageTemplate(self.folder, 'test')
# aq-wrap before we proceed
template = template.__of__(self.folder)
# test rendering engine
with open(os.path.join(path, "macros.pt")) as fd:
data = fd.read()
template.write(data)
try:
output = template()
raised = False
except Unauthorized:
raised = True
self.assertFalse(raised, 'Unauthorized exception raised')
self.assertIn('<i>bar</i><i>bar</i><i>bar</i>', output)
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestPatches),
))
|
src/winforms/toga_winforms/widgets/optioncontainer.py | luizoti/toga | 1,261 | 11130431 | <gh_stars>1000+
from toga_winforms.libs import WinForms
from toga_winforms.window import WinFormsViewport
from .base import Widget
class OptionContainer(Widget):
def create(self):
self.native = WinForms.TabControl()
self.native.Selected += self.winforms_selected
def add_content(self, label, widget):
widget.viewport = WinFormsViewport(self.native, self)
widget.frame = self
# Add all children to the content widget.
for child in widget.interface.children:
child._impl.container = widget
item = WinForms.TabPage()
item.Text = label
# Enable AutoSize on the container to fill
# the available space in the OptionContainer.
widget.AutoSize = True
item.Controls.Add(widget.native)
self.native.TabPages.Add(item)
def remove_content(self, index):
tab_page = self.native.TabPages[index]
self.native.TabPages.Remove(self.native.TabPages[index])
tab_page.Dispose()
def set_on_select(self, handler):
pass
def set_option_enabled(self, index, enabled):
"""
Winforms documentation states that Enabled is not meaningful for this control.
Disabling option only disables the content of the tab, not the tab itself.
"""
self.native.TabPages[index].Enabled = enabled
def is_option_enabled(self, index):
return self.native.TabPages[index].Enabled
def set_option_label(self, index, value):
self.native.TabPages[index].Text = value
def get_option_label(self, index):
return self.native.TabPages[index].Text
def get_current_tab_index(self):
return self.native.SelectedIndex
def set_current_tab_index(self, current_tab_index):
self.native.SelectedIndex = current_tab_index
def winforms_selected(self, sender, event):
if self.interface.on_select:
self.interface.on_select(
self.interface,
option=self.interface.content[self.native.SelectedIndex]
)
def set_font(self, font):
if font:
self.native.Font = font.bind(self.interface.factory).native
|
opennmt/tests/inputter_test.py | OpenNMT/OpenNMT-tf | 1,363 | 11130432 | <reponame>OpenNMT/OpenNMT-tf
import gzip
import io
import os
import numpy as np
import tensorflow as tf
import yaml
from google.protobuf import text_format
from parameterized import parameterized
from tensorboard.plugins import projector
from opennmt import inputters, tokenizers
from opennmt.data import dataset as dataset_util
from opennmt.data import noise
from opennmt.inputters import inputter, record_inputter, text_inputter
from opennmt.layers import reducer
from opennmt.tests import test_util
from opennmt.utils.misc import count_lines, item_or_tuple
class InputterTest(tf.test.TestCase):
def testSaveEmbeddingMetadata(self):
log_dir = os.path.join(self.get_temp_dir(), "log")
if not os.path.exists(log_dir):
os.mkdir(log_dir)
def _create_vocab(vocab_filename, vocab_size=10):
vocab_file = os.path.join(self.get_temp_dir(), vocab_filename)
with open(vocab_file, mode="w") as vocab:
for i in range(vocab_size):
vocab.write("%d\n" % i)
return vocab_file
def _visualize(embedding, vocab_file, num_oov_buckets=1):
text_inputter.save_embeddings_metadata(
log_dir, embedding, vocab_file, num_oov_buckets=num_oov_buckets
)
projector_config = projector.ProjectorConfig()
projector_config_path = os.path.join(log_dir, "projector_config.pbtxt")
self.assertTrue(os.path.exists(projector_config_path))
with open(projector_config_path) as projector_config_file:
text_format.Merge(projector_config_file.read(), projector_config)
return projector_config
def _check_vocab(config, filename, expected_size):
self.assertEqual(config.metadata_path, filename)
self.assertEqual(
count_lines(os.path.join(log_dir, filename)), expected_size
)
# Register an embedding variable.
src_embedding = "model/src_emb/.ATTRIBUTES/VALUE"
src_vocab_file = _create_vocab("src_vocab.txt")
projector_config = _visualize(src_embedding, src_vocab_file)
self.assertEqual(1, len(projector_config.embeddings))
self.assertEqual(src_embedding, projector_config.embeddings[0].tensor_name)
_check_vocab(projector_config.embeddings[0], "model_src_emb.txt", 10 + 1)
# Register a second embedding variable.
tgt_embedding = "model/tgt_emb/.ATTRIBUTES/VALUE"
tgt_vocab_file = _create_vocab("tgt_vocab.txt")
projector_config = _visualize(tgt_embedding, tgt_vocab_file, num_oov_buckets=2)
self.assertEqual(2, len(projector_config.embeddings))
self.assertEqual(tgt_embedding, projector_config.embeddings[1].tensor_name)
_check_vocab(projector_config.embeddings[1], "model_tgt_emb.txt", 10 + 2)
# Update an existing variable.
src_vocab_file = _create_vocab("src_vocab.txt", vocab_size=20)
projector_config = _visualize(src_embedding, src_vocab_file)
self.assertEqual(2, len(projector_config.embeddings))
self.assertEqual(src_embedding, projector_config.embeddings[0].tensor_name)
_check_vocab(projector_config.embeddings[0], "model_src_emb.txt", 20 + 1)
def _makeTextFile(self, name, lines, compress=False):
path = os.path.join(self.get_temp_dir(), name)
if compress:
path = "%s.gz" % path
with (gzip if compress else io).open(path, mode="wt", encoding="utf-8") as f:
for line in lines:
f.write("%s\n" % line)
return path
def _makeEmbeddingsFile(self, vectors, name="embedding", header=False):
path = os.path.join(self.get_temp_dir(), name)
with open(path, "w") as embs:
if header:
embs.write("%d %d\n" % (len(vectors), len(vectors[0][1])))
for word, vector in vectors:
embs.write("%s %s\n" % (word, " ".join(str(v) for v in vector)))
return path
def testPretrainedEmbeddingsLoading(self):
vocab_file = self._makeTextFile("vocab.txt", ["Toto", "tOTO", "tata", "tete"])
embedding_file = self._makeEmbeddingsFile(
[("toto", [1, 1]), ("titi", [2, 2]), ("tata", [3, 3])]
)
embeddings = text_inputter.load_pretrained_embeddings(
embedding_file,
vocab_file,
num_oov_buckets=1,
with_header=False,
case_insensitive_embeddings=True,
)
self.assertAllEqual([5, 2], embeddings.shape)
self.assertAllEqual([1, 1], embeddings[0])
self.assertAllEqual([1, 1], embeddings[1])
self.assertAllEqual([3, 3], embeddings[2])
embeddings = text_inputter.load_pretrained_embeddings(
embedding_file,
vocab_file,
num_oov_buckets=2,
with_header=False,
case_insensitive_embeddings=False,
)
self.assertAllEqual([6, 2], embeddings.shape)
self.assertAllEqual([3, 3], embeddings[2])
def testPretrainedEmbeddingsWithHeaderLoading(self):
vocab_file = self._makeTextFile("vocab.txt", ["Toto", "tOTO", "tata", "tete"])
embedding_file = self._makeEmbeddingsFile(
[("toto", [1, 1]), ("titi", [2, 2]), ("tata", [3, 3])], header=True
)
embeddings = text_inputter.load_pretrained_embeddings(
embedding_file,
vocab_file,
num_oov_buckets=1,
case_insensitive_embeddings=True,
)
self.assertAllEqual([5, 2], embeddings.shape)
self.assertAllEqual([1, 1], embeddings[0])
self.assertAllEqual([1, 1], embeddings[1])
self.assertAllEqual([3, 3], embeddings[2])
@parameterized.expand(
[
[[3, 4], 2, 1, 2, [1, 3, 4, 2], 4],
[[[3, 4], [5, 6]], [2, 1], 1, None, [[1, 3, 4], [1, 5, 0]], [3, 2]],
[[[3, 4], [5, 6]], [2, 1], None, 2, [[3, 4, 2], [5, 2, 0]], [3, 2]],
]
)
def testAddSequenceControls(
self, ids, length, start_id, end_id, expected_ids, expected_length
):
ids = tf.constant(ids, dtype=tf.int64)
length = tf.constant(length, dtype=tf.int32)
ids, length = inputters.add_sequence_controls(
ids, length, start_id=start_id, end_id=end_id
)
self.assertAllEqual(self.evaluate(ids), expected_ids)
self.assertAllEqual(self.evaluate(length), expected_length)
def testAddSequenceControlsRagged(self):
ids = tf.RaggedTensor.from_tensor([[4, 5, 6], [3, 4, 0]], padding=0)
ids = inputters.add_sequence_controls(ids, start_id=1, end_id=2)
self.assertAllEqual(ids.to_list(), [[1, 4, 5, 6, 2], [1, 3, 4, 2]])
def _checkFeatures(self, features, expected_shapes):
for name, expected_shape in expected_shapes.items():
self.assertIn(name, features)
self.assertTrue(features[name].shape.is_compatible_with(expected_shape))
def _testServing(self, inputter):
@tf.function(input_signature=(inputter.input_signature(),))
def _serving_fun(features):
features = inputter.make_features(features=features.copy())
inputs = inputter(features)
return inputs
_serving_fun.get_concrete_function()
def _makeDataset(
self, inputter, data_file, data_config=None, dataset_size=1, shapes=None
):
if data_config is not None:
inputter.initialize(data_config)
self.assertEqual(inputter.get_dataset_size(data_file), dataset_size)
dataset = inputter.make_dataset(data_file)
eager_features = inputter.make_features(iter(dataset).next(), training=True)
eager_features = tf.nest.map_structure(
lambda t: tf.expand_dims(t, 0), eager_features
)
dataset = dataset.map(
lambda *arg: inputter.make_features(item_or_tuple(arg), training=True)
)
dataset = dataset.apply(dataset_util.batch_dataset(1))
features = iter(dataset).next()
if shapes is not None:
self._checkFeatures(features, shapes)
self._checkFeatures(eager_features, shapes)
keep = inputter.keep_for_training(features)
self.assertIs(keep.dtype, tf.bool)
inputs = inputter(features, training=True)
if not isinstance(inputter, inputters.ExampleInputter):
self._testServing(inputter)
return self.evaluate((features, inputs))
def testWordEmbedder(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "toto"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
embedder = text_inputter.WordEmbedder(embedding_size=10)
features, transformed = self._makeDataset(
embedder,
data_file,
data_config={"vocabulary": vocab_file},
shapes={"tokens": [None, None], "ids": [None, None], "length": [None]},
)
self.assertAllEqual([3], features["length"])
self.assertAllEqual([[2, 1, 4]], features["ids"])
self.assertAllEqual([1, 3, 10], transformed.shape)
oov_tokens = embedder.get_oov_tokens(features)
self.assertListEqual(oov_tokens.numpy().flatten().tolist(), [b"!"])
def testWordEmbedderForDecoder(self):
vocab_file = test_util.make_vocab(
os.path.join(self.get_temp_dir(), "vocab.txt"),
["the", "world", "hello", "toto"],
)
embedder = text_inputter.WordEmbedder(embedding_size=10)
embedder.set_decoder_mode(mark_start=True, mark_end=True)
embedder.initialize({"vocabulary": vocab_file})
features = embedder.make_features(tf.constant("hello world !"))
self.assertEqual(features["length"], 4)
self.assertEqual(embedder.get_length(features, ignore_special_tokens=True), 3)
self.assertAllEqual(features["ids"], [1, 5, 4, 7])
self.assertAllEqual(features["ids_out"], [5, 4, 7, 2])
oov_tokens = embedder.get_oov_tokens(features)
self.assertListEqual(oov_tokens.numpy().flatten().tolist(), [b"!"])
def testWordEmbedderWithTokenizer(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "■"])
data_file = self._makeTextFile("data.txt", ["hello world!"])
tokenization = {
"mode": "aggressive",
"joiner_annotate": True,
"joiner_new": True,
}
tokenization_config_path = os.path.join(self.get_temp_dir(), "tok.yml")
with open(tokenization_config_path, "w") as tokenization_config_file:
yaml.dump(tokenization, tokenization_config_file)
embedder = text_inputter.WordEmbedder(embedding_size=10)
data_config = {
"vocabulary": vocab_file,
"tokenization": tokenization_config_path,
}
features, transformed = self._makeDataset(
embedder,
data_file,
data_config=data_config,
shapes={"tokens": [None, None], "ids": [None, None], "length": [None]},
)
self.assertAllEqual([4], features["length"])
self.assertAllEqual([[2, 1, 3, 4]], features["ids"])
def testWordEmbedderWithInGraphTokenizer(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "■"])
embedder = text_inputter.WordEmbedder(embedding_size=10)
data_config = {
"vocabulary": vocab_file,
"tokenization": {"type": "CharacterTokenizer"},
}
embedder.initialize(data_config)
self.assertIn("text", embedder.input_signature())
self._testServing(embedder)
def testWordEmbedderWithCompression(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "■"])
data_file = self._makeTextFile(
"data.txt", ["hello world !", "how are you ?"], compress=True
)
inputter = text_inputter.WordEmbedder(embedding_size=10)
inputter.initialize(dict(vocabulary=vocab_file))
dataset = inputter.make_inference_dataset(data_file, batch_size=1)
iterator = iter(dataset)
self.assertAllEqual(
next(iterator)["tokens"].numpy()[0], [b"hello", b"world", b"!"]
)
def testWordEmbedderWithNoise(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
noiser = noise.WordNoiser(noises=[noise.WordOmission(1)])
embedder = text_inputter.WordEmbedder(embedding_size=10)
embedder.set_noise(noiser, in_place=False)
expected_shapes = {
"tokens": [None, None],
"ids": [None, None],
"length": [None],
"noisy_tokens": [None, None],
"noisy_ids": [None, None],
"noisy_length": [None],
}
features, transformed = self._makeDataset(
embedder,
data_file,
data_config={"vocabulary": vocab_file},
shapes=expected_shapes,
)
self.assertEqual(features["noisy_length"][0], features["length"][0] - 1)
@parameterized.expand([[1], [0]])
def testWordEmbedderWithInPlaceNoise(self, probability):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
noiser = noise.WordNoiser(noises=[noise.WordOmission(1)])
embedder = text_inputter.WordEmbedder(embedding_size=10)
embedder.set_noise(noiser, probability=probability)
features, transformed = self._makeDataset(
embedder,
data_file,
data_config={"vocabulary": vocab_file},
shapes={"tokens": [None, None], "ids": [None, None], "length": [None]},
)
self.assertEqual(features["length"][0], 3 if probability == 0 else 2)
def testWordEmbedderWithPretrainedEmbeddings(self):
data_file = self._makeTextFile("data.txt", ["hello world !"])
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "toto"])
embedding_file = self._makeEmbeddingsFile(
[("hello", [1, 1]), ("world", [2, 2]), ("toto", [3, 3])]
)
embedder = text_inputter.WordEmbedder()
data = {
"vocabulary": vocab_file,
"embedding": {"path": embedding_file, "with_header": False},
}
features, transformed = self._makeDataset(
embedder,
data_file,
data_config=data,
shapes={"tokens": [None, None], "ids": [None, None], "length": [None]},
)
self.assertAllEqual([1, 1], transformed[0][0])
self.assertAllEqual([2, 2], transformed[0][1])
def testWordEmbedderMissingInitialization(self):
embedder = text_inputter.WordEmbedder()
with self.assertRaisesRegex(RuntimeError, "initialize"):
embedder.input_signature()
with self.assertRaisesRegex(RuntimeError, "initialize"):
embedder.make_features("Hello world !")
def testWordEmbedderBatchElement(self):
vocab_file = self._makeTextFile(
"vocab.txt", ["<blank>", "<s>", "</s>"] + list(map(str, range(10)))
)
embedder = text_inputter.WordEmbedder(32)
embedder.initialize(dict(vocabulary=vocab_file))
features = embedder.make_features(["1 2 3", "1 2 3 4"])
self.assertAllEqual(features["length"], [3, 4])
self.assertAllEqual(features["ids"], [[4, 5, 6, 0], [4, 5, 6, 7]])
embedder.set_decoder_mode(mark_start=True, mark_end=True)
features = embedder.make_features(["1 2 3", "1 2 3 4"])
self.assertAllEqual(features["length"], [4, 5])
self.assertAllEqual(features["ids"], [[1, 4, 5, 6, 0], [1, 4, 5, 6, 7]])
self.assertAllEqual(features["ids_out"], [[4, 5, 6, 2, 0], [4, 5, 6, 7, 2]])
def testCharConvEmbedder(self):
vocab_file = self._makeTextFile("vocab.txt", ["h", "e", "l", "w", "o"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
embedder = text_inputter.CharConvEmbedder(10, 5)
features, transformed = self._makeDataset(
embedder,
data_file,
data_config={"vocabulary": vocab_file},
shapes={"char_ids": [None, None, None], "length": [None]},
)
self.assertAllEqual([3], features["length"])
self.assertAllEqual(
[[[0, 1, 2, 2, 4], [3, 4, 5, 2, 5], [5, 5, 5, 5, 5]]], features["char_ids"]
)
self.assertAllEqual([1, 3, 5], transformed.shape)
def testCharRNNEmbedder(self):
vocab_file = self._makeTextFile("vocab.txt", ["h", "e", "l", "w", "o"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
embedder = text_inputter.CharRNNEmbedder(10, 5)
features, transformed = self._makeDataset(
embedder,
data_file,
data_config={"vocabulary": vocab_file},
shapes={"char_ids": [None, None, None], "length": [None]},
)
self.assertAllEqual([1, 3, 5], transformed.shape)
def testParallelInputter(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "toto"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
data_files = [data_file, data_file]
parallel_inputter = inputter.ParallelInputter(
[
text_inputter.WordEmbedder(embedding_size=10),
text_inputter.WordEmbedder(embedding_size=5),
]
)
self.assertEqual(parallel_inputter.num_outputs, 2)
features, transformed = self._makeDataset(
parallel_inputter,
data_files,
data_config={"1_vocabulary": vocab_file, "2_vocabulary": vocab_file},
shapes={
"inputter_0_ids": [None, None],
"inputter_0_length": [None],
"inputter_1_ids": [None, None],
"inputter_1_length": [None],
},
)
self.assertEqual(2, len(parallel_inputter.get_length(features)))
self.assertEqual(2, len(transformed))
self.assertAllEqual([1, 3, 10], transformed[0].shape)
self.assertAllEqual([1, 3, 5], transformed[1].shape)
def testParallelInputterShareParameters(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "toto"])
data_config = {"1_vocabulary": vocab_file, "2_vocabulary": vocab_file}
inputters = [
text_inputter.WordEmbedder(embedding_size=10),
text_inputter.WordEmbedder(embedding_size=10),
]
parallel_inputter = inputter.ParallelInputter(inputters, share_parameters=True)
parallel_inputter.initialize(data_config)
parallel_inputter.build(None)
self.assertEqual(inputters[0].embedding.ref(), inputters[1].embedding.ref())
def testNestedParallelInputterShareParameters(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "toto"])
data_config = {
"1_1_vocabulary": vocab_file,
"1_2_vocabulary": vocab_file,
"2_vocabulary": vocab_file,
}
source_inputters = [
text_inputter.WordEmbedder(embedding_size=10),
text_inputter.WordEmbedder(embedding_size=10),
]
target_inputter = text_inputter.WordEmbedder(embedding_size=10)
inputters = [
inputter.ParallelInputter(source_inputters, share_parameters=True),
target_inputter,
]
parallel_inputter = inputter.ParallelInputter(inputters, share_parameters=True)
parallel_inputter.initialize(data_config)
parallel_inputter.build(None)
self.assertEqual(
source_inputters[0].embedding.ref(), target_inputter.embedding.ref()
)
self.assertEqual(
source_inputters[1].embedding.ref(), target_inputter.embedding.ref()
)
def testNestedInputtersWithFlatDataFiles(self):
inputters = inputter.ParallelInputter(
[
record_inputter.SequenceRecordInputter(10),
record_inputter.SequenceRecordInputter(10),
],
reducer=reducer.SumReducer(),
)
inputters = inputter.ParallelInputter(
[
record_inputter.SequenceRecordInputter(10),
inputters,
],
reducer=reducer.ConcatReducer(),
)
self.assertListEqual(inputters._structure(), [None, [None, None]])
empty_file = os.path.join(self.get_temp_dir(), "test.txt")
with open(empty_file, "w"):
pass
with self.assertRaises(ValueError):
inputters.make_inference_dataset([empty_file, empty_file], batch_size=2)
inputters.make_inference_dataset(
[empty_file, empty_file, empty_file], batch_size=2
)
def testExampleInputter(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "toto"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
source_inputter = text_inputter.WordEmbedder(embedding_size=10)
target_inputter = text_inputter.WordEmbedder(embedding_size=10)
example_inputter = inputter.ExampleInputter(source_inputter, target_inputter)
self.assertEqual(example_inputter.num_outputs, 2)
features, transformed = self._makeDataset(
example_inputter,
[data_file, data_file],
data_config={
"source_vocabulary": vocab_file,
"target_vocabulary": vocab_file,
},
)
self.assertIsInstance(features, tuple)
self.assertEqual(len(features), 2)
self.assertEqual(len(transformed), 2)
features, labels = features
for field in ("ids", "length", "tokens"):
self.assertIn(field, features)
for field in ("ids", "length", "tokens"):
self.assertIn(field, labels)
def testExampleInputterFiltering(self):
vocab_file = self._makeTextFile("vocab.txt", ["a", "b", "c", "d"])
features_file = self._makeTextFile(
"features.txt", ["a b c d", "a b c", "a a c", "a"]
)
labels_file = self._makeTextFile(
"labels.txt", ["a b c d", "a", "a a c d d", ""]
)
example_inputter = inputter.ExampleInputter(
text_inputter.WordEmbedder(embedding_size=10),
text_inputter.WordEmbedder(embedding_size=10),
)
example_inputter.initialize(
{"source_vocabulary": vocab_file, "target_vocabulary": vocab_file}
)
dataset = example_inputter.make_training_dataset(
features_file,
labels_file,
batch_size=1,
maximum_features_length=3,
maximum_labels_length=4,
single_pass=True,
)
examples = list(iter(dataset))
self.assertLen(examples, 1)
self.assertAllEqual(examples[0][0]["ids"], [[0, 1, 2]])
self.assertAllEqual(examples[0][1]["ids"], [[0]])
def testWeightedDataset(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "toto"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
source_inputter = text_inputter.WordEmbedder(embedding_size=10)
target_inputter = text_inputter.WordEmbedder(embedding_size=10)
example_inputter = inputter.ExampleInputter(source_inputter, target_inputter)
example_inputter.initialize(
{"source_vocabulary": vocab_file, "target_vocabulary": vocab_file}
)
with self.assertRaisesRegex(ValueError, "same number"):
example_inputter.make_training_dataset(
[data_file, data_file], [data_file], batch_size=16
)
with self.assertRaisesRegex(ValueError, "expected to match"):
example_inputter.make_training_dataset(
[data_file, data_file],
[data_file, data_file],
batch_size=16,
weights=[0.5],
)
dataset = example_inputter.make_training_dataset(
[data_file, data_file], [data_file, data_file], batch_size=16
)
self.assertIsInstance(dataset, tf.data.Dataset)
dataset = example_inputter.make_training_dataset(
[data_file, data_file],
[data_file, data_file],
batch_size=16,
weights=[0.2, 0.8],
)
self.assertIsInstance(dataset, tf.data.Dataset)
def testBatchAutotuneDataset(self):
vocab_file = self._makeTextFile("vocab.txt", ["1", "2", "3", "4"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
source_inputter = text_inputter.WordEmbedder(embedding_size=10)
target_inputter = text_inputter.WordEmbedder(embedding_size=10)
target_inputter.set_decoder_mode(mark_start=True, mark_end=True)
example_inputter = inputter.ExampleInputter(source_inputter, target_inputter)
example_inputter.initialize(
{"source_vocabulary": vocab_file, "target_vocabulary": vocab_file}
)
dataset = example_inputter.make_training_dataset(
data_file,
data_file,
batch_size=1024,
batch_type="tokens",
maximum_features_length=100,
maximum_labels_length=120,
batch_autotune_mode=True,
)
source, target = next(iter(dataset))
self.assertListEqual(source["ids"].shape.as_list(), [8, 100])
self.assertListEqual(target["ids"].shape.as_list(), [8, 120])
self.assertListEqual(target["ids_out"].shape.as_list(), [8, 120])
def testBatchAutotuneDatasetMultiSource(self):
vocab_file = self._makeTextFile("vocab.txt", ["1", "2", "3", "4"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
source_inputter = inputter.ParallelInputter(
[
text_inputter.WordEmbedder(embedding_size=10),
text_inputter.WordEmbedder(embedding_size=10),
]
)
target_inputter = text_inputter.WordEmbedder(embedding_size=10)
target_inputter.set_decoder_mode(mark_start=True, mark_end=True)
example_inputter = inputter.ExampleInputter(source_inputter, target_inputter)
example_inputter.initialize(
{
"source_1_vocabulary": vocab_file,
"source_2_vocabulary": vocab_file,
"target_vocabulary": vocab_file,
}
)
dataset = example_inputter.make_training_dataset(
[data_file, data_file],
data_file,
batch_size=1024,
batch_type="tokens",
maximum_features_length=[100, 110],
maximum_labels_length=120,
batch_autotune_mode=True,
)
source, target = next(iter(dataset))
self.assertListEqual(source["inputter_0_ids"].shape.as_list(), [8, 100])
self.assertListEqual(source["inputter_1_ids"].shape.as_list(), [8, 110])
self.assertListEqual(target["ids"].shape.as_list(), [8, 120])
self.assertListEqual(target["ids_out"].shape.as_list(), [8, 120])
def testExampleInputterAsset(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "toto"])
source_inputter = text_inputter.WordEmbedder(embedding_size=10)
target_inputter = text_inputter.WordEmbedder(embedding_size=10)
example_inputter = inputter.ExampleInputter(source_inputter, target_inputter)
example_inputter.initialize(
{
"source_vocabulary": vocab_file,
"target_vocabulary": vocab_file,
"source_tokenization": {"mode": "conservative"},
}
)
self.assertIsInstance(source_inputter.tokenizer, tokenizers.OpenNMTTokenizer)
self.assertTrue(example_inputter.has_prepare_step())
asset_dir = self.get_temp_dir()
example_inputter.export_assets(asset_dir)
self.assertIn("source_tokenizer_config.yml", set(os.listdir(asset_dir)))
def testMixedInputter(self):
vocab_file = self._makeTextFile("vocab.txt", ["the", "world", "hello", "toto"])
vocab_alt_file = self._makeTextFile("vocab_alt.txt", ["h", "e", "l", "w", "o"])
data_file = self._makeTextFile("data.txt", ["hello world !"])
mixed_inputter = inputter.MixedInputter(
[
text_inputter.WordEmbedder(embedding_size=10),
text_inputter.CharConvEmbedder(10, 5),
],
reducer=reducer.ConcatReducer(),
)
self.assertEqual(mixed_inputter.num_outputs, 1)
features, transformed = self._makeDataset(
mixed_inputter,
data_file,
data_config={"1_vocabulary": vocab_file, "2_vocabulary": vocab_alt_file},
shapes={
"char_ids": [None, None, None],
"ids": [None, None],
"length": [None],
},
)
self.assertAllEqual([1, 3, 15], transformed.shape)
def testSequenceRecord(self):
vector = np.array([[0.2, 0.3], [0.4, 0.5]], dtype=np.float32)
record_file = os.path.join(self.get_temp_dir(), "data.records")
record_inputter.create_sequence_records([vector], record_file)
inputter = record_inputter.SequenceRecordInputter(2)
features, transformed = self._makeDataset(
inputter,
record_file,
dataset_size=None,
shapes={"tensor": [None, None, 2], "length": [None]},
)
self.assertEqual([2], features["length"])
self.assertAllEqual([vector], features["tensor"])
self.assertAllEqual([vector], transformed)
def testSequenceRecordBatch(self):
vectors = [
np.random.rand(3, 2),
np.random.rand(6, 2),
np.random.rand(1, 2),
]
record_file = os.path.join(self.get_temp_dir(), "data.records")
record_inputter.create_sequence_records(vectors, record_file)
inputter = record_inputter.SequenceRecordInputter(2)
dataset = inputter.make_dataset(record_file)
dataset = dataset.batch(3)
dataset = dataset.map(inputter.make_features)
features = next(iter(dataset))
lengths = features["length"]
tensors = features["tensor"]
self.assertAllEqual(lengths, [3, 6, 1])
for length, tensor, expected_vector in zip(lengths, tensors, vectors):
self.assertAllClose(tensor[:length], expected_vector)
def testSequenceRecordWithCompression(self):
vector = np.array([[0.2, 0.3], [0.4, 0.5]], dtype=np.float32)
compression = "GZIP"
record_file = os.path.join(self.get_temp_dir(), "data.records")
record_file = record_inputter.create_sequence_records(
[vector], record_file, compression=compression
)
inputter = record_inputter.SequenceRecordInputter(2)
dataset = inputter.make_inference_dataset(record_file, batch_size=1)
iterator = iter(dataset)
self.assertAllEqual(next(iterator)["tensor"].numpy()[0], vector)
if __name__ == "__main__":
tf.test.main()
|
pylearn2/datasets/tests/test_four_regions.py | ikervazquezlopez/Pylearn2 | 2,045 | 11130457 | import numpy as np
from pylearn2.datasets.four_regions import FourRegions
def test_four_regions():
dataset = FourRegions(5000)
X = dataset.get_design_matrix()
np.testing.assert_(((X < 1.) & (X > -1.)).all())
y = dataset.get_targets()
np.testing.assert_equal(np.unique(y), [0, 1, 2, 3])
|
samples/migrateADCGen1/mappers/adlsg1.py | daniel-dqsdatalabs/pyapacheatlas | 104 | 11130494 | import sys
sys.path.append("./")
from .assetmapper import AssetMapper
from urllib.parse import urlparse
class ADLSGen1Directory(AssetMapper):
def __init__(self, asset, termMap, typeName='azure_datalake_gen1_path', columnTypeName='column'):
super().__init__(asset, termMap, typeName=typeName, columnTypeName=columnTypeName)
def qualified_name(self):
url = self.asset["properties"].get("dsl", {}).get("address", {}).get("url", None)
parsed = urlparse(url)
url = parsed.geturl().replace("https", "adl", 1)
return f"{url}"
def column_qualified_name_pattern(self, columnName, **kwargs):
return columnName
# Override
def partial_column_updates(self):
return []
class ADLSGen1DataLake(AssetMapper):
def __init__(self, asset, termMap, typeName='azure_datalake_gen1_path', columnTypeName='column'):
super().__init__(asset, termMap, typeName=typeName, columnTypeName=columnTypeName)
def qualified_name(self):
url = self.asset["properties"].get("dsl", {}).get("address", {}).get("url", None)
parsed = urlparse(url)
url = parsed.geturl().replace("https", "adl", 1)
if url[-1] == "/":
url = url[:-1]
return f"{url}"
def column_qualified_name_pattern(self, columnName, **kwargs):
return columnName
# Override
def partial_column_updates(self):
return []
|
tfprob/gan/__init__.py | AlexBlack2202/EigenGAN-Tensorflow | 581 | 11130502 | <filename>tfprob/gan/__init__.py<gh_stars>100-1000
from tfprob.gan.gradient_penalty import *
from tfprob.gan.loss import *
|
tools/third_party/pywebsocket3/mod_pywebsocket/standalone.py | meyerweb/wpt | 14,668 | 11130503 | <reponame>meyerweb/wpt
#!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Standalone WebSocket server.
Use this file to launch pywebsocket as a standalone server.
BASIC USAGE
===========
Go to the src directory and run
$ python mod_pywebsocket/standalone.py [-p <ws_port>]
[-w <websock_handlers>]
[-d <document_root>]
<ws_port> is the port number to use for ws:// connection.
<document_root> is the path to the root directory of HTML files.
<websock_handlers> is the path to the root directory of WebSocket handlers.
If not specified, <document_root> will be used. See __init__.py (or
run $ pydoc mod_pywebsocket) for how to write WebSocket handlers.
For more detail and other options, run
$ python mod_pywebsocket/standalone.py --help
or see _build_option_parser method below.
For trouble shooting, adding "--log_level debug" might help you.
TRY DEMO
========
Go to the src directory and run standalone.py with -d option to set the
document root to the directory containing example HTMLs and handlers like this:
$ cd src
$ PYTHONPATH=. python mod_pywebsocket/standalone.py -d example
to launch pywebsocket with the sample handler and html on port 80. Open
http://localhost/console.html, click the connect button, type something into
the text box next to the send button and click the send button. If everything
is working, you'll see the message you typed echoed by the server.
USING TLS
=========
To run the standalone server with TLS support, run it with -t, -k, and -c
options. When TLS is enabled, the standalone server accepts only TLS connection.
Note that when ssl module is used and the key/cert location is incorrect,
TLS connection silently fails while pyOpenSSL fails on startup.
Example:
$ PYTHONPATH=. python mod_pywebsocket/standalone.py \
-d example \
-p 10443 \
-t \
-c ../test/cert/cert.pem \
-k ../test/cert/key.pem \
Note that when passing a relative path to -c and -k option, it will be resolved
using the document root directory as the base.
USING CLIENT AUTHENTICATION
===========================
To run the standalone server with TLS client authentication support, run it with
--tls-client-auth and --tls-client-ca options in addition to ones required for
TLS support.
Example:
$ PYTHONPATH=. python mod_pywebsocket/standalone.py -d example -p 10443 -t \
-c ../test/cert/cert.pem -k ../test/cert/key.pem \
--tls-client-auth \
--tls-client-ca=../test/cert/cacert.pem
Note that when passing a relative path to --tls-client-ca option, it will be
resolved using the document root directory as the base.
CONFIGURATION FILE
==================
You can also write a configuration file and use it by specifying the path to
the configuration file by --config option. Please write a configuration file
following the documentation of the Python ConfigParser library. Name of each
entry must be the long version argument name. E.g. to set log level to debug,
add the following line:
log_level=debug
For options which doesn't take value, please add some fake value. E.g. for
--tls option, add the following line:
tls=True
Note that tls will be enabled even if you write tls=False as the value part is
fake.
When both a command line argument and a configuration file entry are set for
the same configuration item, the command line value will override one in the
configuration file.
THREADING
=========
This server is derived from SocketServer.ThreadingMixIn. Hence a thread is
used for each request.
SECURITY WARNING
================
This uses CGIHTTPServer and CGIHTTPServer is not secure.
It may execute arbitrary Python code or external programs. It should not be
used outside a firewall.
"""
from __future__ import absolute_import
from six.moves import configparser
import base64
import logging
import argparse
import os
import six
import sys
import traceback
from mod_pywebsocket import common
from mod_pywebsocket import util
from mod_pywebsocket import server_util
from mod_pywebsocket.websocket_server import WebSocketServer
_DEFAULT_LOG_MAX_BYTES = 1024 * 256
_DEFAULT_LOG_BACKUP_COUNT = 5
_DEFAULT_REQUEST_QUEUE_SIZE = 128
def _build_option_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--config',
dest='config_file',
type=six.text_type,
default=None,
help=('Path to configuration file. See the file comment '
'at the top of this file for the configuration '
'file format'))
parser.add_argument('-H',
'--server-host',
'--server_host',
dest='server_host',
default='',
help='server hostname to listen to')
parser.add_argument('-V',
'--validation-host',
'--validation_host',
dest='validation_host',
default=None,
help='server hostname to validate in absolute path.')
parser.add_argument('-p',
'--port',
dest='port',
type=int,
default=common.DEFAULT_WEB_SOCKET_PORT,
help='port to listen to')
parser.add_argument('-P',
'--validation-port',
'--validation_port',
dest='validation_port',
type=int,
default=None,
help='server port to validate in absolute path.')
parser.add_argument(
'-w',
'--websock-handlers',
'--websock_handlers',
dest='websock_handlers',
default='.',
help=('The root directory of WebSocket handler files. '
'If the path is relative, --document-root is used '
'as the base.'))
parser.add_argument('-m',
'--websock-handlers-map-file',
'--websock_handlers_map_file',
dest='websock_handlers_map_file',
default=None,
help=('WebSocket handlers map file. '
'Each line consists of alias_resource_path and '
'existing_resource_path, separated by spaces.'))
parser.add_argument('-s',
'--scan-dir',
'--scan_dir',
dest='scan_dir',
default=None,
help=('Must be a directory under --websock-handlers. '
'Only handlers under this directory are scanned '
'and registered to the server. '
'Useful for saving scan time when the handler '
'root directory contains lots of files that are '
'not handler file or are handler files but you '
'don\'t want them to be registered. '))
parser.add_argument(
'--allow-handlers-outside-root-dir',
'--allow_handlers_outside_root_dir',
dest='allow_handlers_outside_root_dir',
action='store_true',
default=False,
help=('Scans WebSocket handlers even if their canonical '
'path is not under --websock-handlers.'))
parser.add_argument('-d',
'--document-root',
'--document_root',
dest='document_root',
default='.',
help='Document root directory.')
parser.add_argument('-x',
'--cgi-paths',
'--cgi_paths',
dest='cgi_paths',
default=None,
help=('CGI paths relative to document_root.'
'Comma-separated. (e.g -x /cgi,/htbin) '
'Files under document_root/cgi_path are handled '
'as CGI programs. Must be executable.'))
parser.add_argument('-t',
'--tls',
dest='use_tls',
action='store_true',
default=False,
help='use TLS (wss://)')
parser.add_argument('-k',
'--private-key',
'--private_key',
dest='private_key',
default='',
help='TLS private key file.')
parser.add_argument('-c',
'--certificate',
dest='certificate',
default='',
help='TLS certificate file.')
parser.add_argument('--tls-client-auth',
dest='tls_client_auth',
action='store_true',
default=False,
help='Requests TLS client auth on every connection.')
parser.add_argument('--tls-client-cert-optional',
dest='tls_client_cert_optional',
action='store_true',
default=False,
help=('Makes client certificate optional even though '
'TLS client auth is enabled.'))
parser.add_argument('--tls-client-ca',
dest='tls_client_ca',
default='',
help=('Specifies a pem file which contains a set of '
'concatenated CA certificates which are used to '
'validate certificates passed from clients'))
parser.add_argument('--basic-auth',
dest='use_basic_auth',
action='store_true',
default=False,
help='Requires Basic authentication.')
parser.add_argument(
'--basic-auth-credential',
dest='basic_auth_credential',
default='test:test',
help='Specifies the credential of basic authentication '
'by username:password pair (e.g. test:test).')
parser.add_argument('-l',
'--log-file',
'--log_file',
dest='log_file',
default='',
help='Log file.')
# Custom log level:
# - FINE: Prints status of each frame processing step
parser.add_argument('--log-level',
'--log_level',
type=six.text_type,
dest='log_level',
default='warn',
choices=[
'fine', 'debug', 'info', 'warning', 'warn',
'error', 'critical'
],
help='Log level.')
parser.add_argument(
'--deflate-log-level',
'--deflate_log_level',
type=six.text_type,
dest='deflate_log_level',
default='warn',
choices=['debug', 'info', 'warning', 'warn', 'error', 'critical'],
help='Log level for _Deflater and _Inflater.')
parser.add_argument('--thread-monitor-interval-in-sec',
'--thread_monitor_interval_in_sec',
dest='thread_monitor_interval_in_sec',
type=int,
default=-1,
help=('If positive integer is specified, run a thread '
'monitor to show the status of server threads '
'periodically in the specified inteval in '
'second. If non-positive integer is specified, '
'disable the thread monitor.'))
parser.add_argument('--log-max',
'--log_max',
dest='log_max',
type=int,
default=_DEFAULT_LOG_MAX_BYTES,
help='Log maximum bytes')
parser.add_argument('--log-count',
'--log_count',
dest='log_count',
type=int,
default=_DEFAULT_LOG_BACKUP_COUNT,
help='Log backup count')
parser.add_argument('-q',
'--queue',
dest='request_queue_size',
type=int,
default=_DEFAULT_REQUEST_QUEUE_SIZE,
help='request queue size')
return parser
def _parse_args_and_config(args):
parser = _build_option_parser()
# First, parse options without configuration file.
temporary_options, temporary_args = parser.parse_known_args(args=args)
if temporary_args:
logging.critical('Unrecognized positional arguments: %r',
temporary_args)
sys.exit(1)
if temporary_options.config_file:
try:
config_fp = open(temporary_options.config_file, 'r')
except IOError as e:
logging.critical('Failed to open configuration file %r: %r',
temporary_options.config_file, e)
sys.exit(1)
config_parser = configparser.SafeConfigParser()
config_parser.readfp(config_fp)
config_fp.close()
args_from_config = []
for name, value in config_parser.items('pywebsocket'):
args_from_config.append('--' + name)
args_from_config.append(value)
if args is None:
args = args_from_config
else:
args = args_from_config + args
return parser.parse_known_args(args=args)
else:
return temporary_options, temporary_args
def _main(args=None):
"""You can call this function from your own program, but please note that
this function has some side-effects that might affect your program. For
example, it changes the current directory.
"""
options, args = _parse_args_and_config(args=args)
os.chdir(options.document_root)
server_util.configure_logging(options)
# TODO(tyoshino): Clean up initialization of CGI related values. Move some
# of code here to WebSocketRequestHandler class if it's better.
options.cgi_directories = []
options.is_executable_method = None
if options.cgi_paths:
options.cgi_directories = options.cgi_paths.split(',')
if sys.platform in ('cygwin', 'win32'):
cygwin_path = None
# For Win32 Python, it is expected that CYGWIN_PATH
# is set to a directory of cygwin binaries.
# For example, websocket_server.py in Chromium sets CYGWIN_PATH to
# full path of third_party/cygwin/bin.
if 'CYGWIN_PATH' in os.environ:
cygwin_path = os.environ['CYGWIN_PATH']
def __check_script(scriptpath):
return util.get_script_interp(scriptpath, cygwin_path)
options.is_executable_method = __check_script
if options.use_tls:
logging.debug('Using ssl module')
if not options.private_key or not options.certificate:
logging.critical(
'To use TLS, specify private_key and certificate.')
sys.exit(1)
if (options.tls_client_cert_optional and not options.tls_client_auth):
logging.critical('Client authentication must be enabled to '
'specify tls_client_cert_optional')
sys.exit(1)
else:
if options.tls_client_auth:
logging.critical('TLS must be enabled for client authentication.')
sys.exit(1)
if options.tls_client_cert_optional:
logging.critical('TLS must be enabled for client authentication.')
sys.exit(1)
if not options.scan_dir:
options.scan_dir = options.websock_handlers
if options.use_basic_auth:
options.basic_auth_credential = 'Basic ' + base64.b64encode(
options.basic_auth_credential.encode('UTF-8')).decode()
try:
if options.thread_monitor_interval_in_sec > 0:
# Run a thread monitor to show the status of server threads for
# debugging.
server_util.ThreadMonitor(
options.thread_monitor_interval_in_sec).start()
server = WebSocketServer(options)
server.serve_forever()
except Exception as e:
logging.critical('mod_pywebsocket: %s' % e)
logging.critical('mod_pywebsocket: %s' % traceback.format_exc())
sys.exit(1)
if __name__ == '__main__':
_main(sys.argv[1:])
# vi:sts=4 sw=4 et
|
tests/openbb_terminal/stocks/screener/test_finviz_view.py | tehcoderer/GamestonkTerminal | 255 | 11130506 | # IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.screener import finviz_view
from openbb_terminal import helper_funcs
@pytest.mark.vcr
@pytest.mark.parametrize(
"toggle",
[
True,
False,
],
)
@pytest.mark.record_stdout
def test_screener(mocker, toggle):
# MOCK CHARTS
mocker.patch.object(
target=helper_funcs.obbff,
attribute="USE_TABULATE_DF",
new=toggle,
)
# MOCK EXPORT_DATA
mocker.patch(
target="openbb_terminal.stocks.screener.finviz_view.export_data",
)
# MOCK PROGRESS_BAR
mocker.patch(
target="finvizfinance.screener.overview.progress_bar",
)
finviz_view.screener(
loaded_preset="top_gainers",
data_type="overview",
limit=2,
ascend=True,
sort="Ticker",
export="",
)
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"data",
[
None,
pd.DataFrame(),
],
)
def test_screener_no_data(data, mocker):
# MOCK GET_SCREENER_DATA
mocker.patch(
target="openbb_terminal.stocks.screener.finviz_view.get_screener_data",
return_value=data,
)
result = finviz_view.screener(
loaded_preset="top_gainers",
data_type="overview",
limit=2,
ascend=True,
sort="",
export="",
)
assert result == [] # pylint: disable=use-implicit-booleaness-not-comparison
@pytest.mark.vcr
@pytest.mark.parametrize(
"sort",
[
"Ticker",
"MOCK_SORT",
],
)
@pytest.mark.record_stdout
def test_screener_sort_matches(sort, mocker):
# MOCK CHARTS
mocker.patch.object(
target=helper_funcs.obbff,
attribute="USE_TABULATE_DF",
new=True,
)
# MOCK EXPORT_DATA
mocker.patch(
target="openbb_terminal.stocks.screener.finviz_view.export_data",
)
# MOCK PROGRESS_BAR
mocker.patch(
target="finvizfinance.screener.overview.progress_bar",
)
finviz_view.screener(
loaded_preset="top_gainers",
data_type="overview",
limit=2,
ascend=True,
sort=sort,
export="",
)
|
tools/sumo_multi_clients.py | isgeles/SMARTS | 554 | 11130547 | <reponame>isgeles/SMARTS
import os
import random
import subprocess
import threading
import time
from smarts.core.utils.sumo import sumolib, traci, SUMO_PATH
PORT = 8001
"""
Conclusions:
1. connected clients < num-clients: SUMO will block, only start once all clients have connected.
2. connected clients > num-clients: Extra connection will be closed by SUMO.
3. The simulation does not advance to the next step until all clients have called the 'simulationStep' command.
4. For multi client scenarios currently only TargetTime 0 is supported, which means 'simulationStep' performs exactly one time step.
"""
def start_sumo_server():
sumo_binary = "sumo"
sumo_cmd = [
os.path.join(SUMO_PATH, "bin", sumo_binary),
"--net-file=scenarios/loop/map.net.xml",
"--num-clients=3",
"--remote-port=%s" % PORT,
]
sumo_proc = subprocess.Popen(
sumo_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
time.sleep(0.1)
traci_conn = traci.connect(
PORT, numRetries=100, proc=sumo_proc, waitBetweenRetries=0.01
)
return traci_conn
def connect(port, order=None):
traci_conn = traci.connect(port, numRetries=100, proc=None, waitBetweenRetries=0.1)
if order is not None:
traci_conn.setOrder(order)
return traci_conn
def test_client_connection(client, client_name):
for i in range(10):
print(f"{client_name} steping simulation")
client.simulationStep()
client.close()
def init_client():
client = start_sumo_server()
client.setOrder(1)
test_client_connection(client, "client 1")
def run_client_2():
client2 = connect(PORT, 2)
test_client_connection(client2, "client 2")
def run_client_3():
client3 = connect(PORT, 3)
test_client_connection(client3, "client 3")
def main():
t1 = threading.Thread(target=init_client, args=())
t1.start()
t2 = threading.Thread(target=run_client_2, args=())
t2.start()
t3 = threading.Thread(target=run_client_3, args=())
t3.start()
t1.join()
t2.join()
t3.join()
if __name__ == "__main__":
main()
|
util/pyclient/bft_config.py | definitelyNotFBI/utt | 340 | 11130566 | <filename>util/pyclient/bft_config.py
# Concord
#
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
# This code requires python 3.5 or later
from collections import namedtuple
Config = namedtuple('Config', ['id', 'f', 'c', 'max_msg_size', 'req_timeout_milli',
'retry_timeout_milli', "certs_path", "txn_signing_keys_path", "principals_to_participant_map"])
Replica = namedtuple('Replica', ['id', 'ip', 'port', 'metrics_port'])
START_BFT_MSG_PORT = 3710
START_METRICS_PORT = 4710
def bft_msg_port_from_node_id(id):
return START_BFT_MSG_PORT + 2*id
def metrics_port_from_node_id(id):
return START_METRICS_PORT + 2*id
COMM_TYPE_TCP_TLS = "tcp_tls"
COMM_TYPE_UDP = "udp" |
pkcs11/types.py | superG1zm0/python-pkcs11 | 114 | 11130575 | """
Types for high level PKCS#11 wrapper.
This module provides stubs that are overrideen in pkcs11._pkcs11.
"""
from threading import RLock
from binascii import hexlify
from cached_property import cached_property
from .constants import (
Attribute,
MechanismFlag,
ObjectClass,
SlotFlag,
TokenFlag,
UserType,
)
from .mechanisms import KeyType, Mechanism
from .exceptions import (
ArgumentsBad,
AttributeTypeInvalid,
NoSuchKey,
MultipleObjectsReturned,
SignatureInvalid,
SignatureLenRange,
)
PROTECTED_AUTH = object()
"""Indicate the pin should be supplied via an external mechanism (e.g. pin pad)"""
def _CK_UTF8CHAR_to_str(data):
"""Convert CK_UTF8CHAR to string."""
return data.rstrip(b'\0').decode('utf-8').rstrip()
def _CK_VERSION_to_tuple(data):
"""Convert CK_VERSION to tuple."""
return (data['major'], data['minor'])
def _CK_MECHANISM_TYPE_to_enum(mechanism):
"""Convert CK_MECHANISM_TYPE to enum or be okay."""
try:
return Mechanism(mechanism)
except ValueError:
return mechanism
class MechanismInfo:
"""
Information about a mechanism.
See :meth:`pkcs11.Slot.get_mechanism_info`.
"""
def __init__(self,
slot,
mechanism,
ulMinKeySize=None,
ulMaxKeySize=None,
flags=None,
**kwargs):
self.slot = slot
""":class:`pkcs11.Slot` this information is for."""
self.mechanism = mechanism
""":class:`pkcs11.mechanisms.Mechanism` this information is for."""
self.min_key_length = ulMinKeySize
"""Minimum key length in bits (:class:`int`)."""
self.max_key_length = ulMaxKeySize
"""Maximum key length in bits (:class:`int`)."""
self.flags = MechanismFlag(flags)
"""Mechanism capabilities (:class:`pkcs11.constants.MechanismFlag`)."""
def __str__(self):
return '\n'.join((
"Supported key lengths: [%s, %s]" % (self.min_key_length,
self.max_key_length),
"Flags: %s" % self.flags,
))
def __repr__(self):
return '<{klass} (mechanism={mechanism}, flags={flags})>'.format(
klass=type(self).__name__,
mechanism=str(self.mechanism),
flags=str(self.flags))
class Slot:
"""
A PKCS#11 device slot.
This object represents a physical or software slot exposed by PKCS#11.
A slot has hardware capabilities, e.g. supported mechanisms and may has
a physical or software :class:`Token` installed.
"""
def __init__(self, lib, slot_id,
slotDescription=None,
manufacturerID=None,
hardwareVersion=None,
firmwareVersion=None,
flags=None,
**kwargs):
self._lib = lib # Hold a reference to the lib to prevent gc
self.slot_id = slot_id
"""Slot identifier (opaque)."""
self.slot_description = _CK_UTF8CHAR_to_str(slotDescription)
"""Slot name (:class:`str`)."""
self.manufacturer_id = _CK_UTF8CHAR_to_str(manufacturerID)
"""Slot/device manufacturer's name (:class:`str`)."""
self.hardware_version = _CK_VERSION_to_tuple(hardwareVersion)
"""Hardware version (:class:`tuple`)."""
self.firmware_version = _CK_VERSION_to_tuple(firmwareVersion)
"""Firmware version (:class:`tuple`)."""
self.flags = SlotFlag(flags)
"""Capabilities of this slot (:class:`SlotFlag`)."""
def get_token(self):
"""
Returns the token loaded into this slot.
:rtype: Token
"""
raise NotImplementedError()
def get_mechanisms(self):
"""
Returns the mechanisms supported by this device.
:rtype: set(Mechanism)
"""
raise NotImplementedError()
def get_mechanism_info(self, mechanism):
"""
Returns information about the mechanism.
:param Mechanism mechanism: mechanism to learn about
:rtype: MechanismInfo
"""
raise NotImplementedError()
def __eq__(self, other):
return self.slot_id == other.slot_id
def __str__(self):
return '\n'.join((
"Slot Description: %s" % self.slot_description,
"Manufacturer ID: %s" % self.manufacturer_id,
"Hardware Version: %s.%s" % self.hardware_version,
"Firmware Version: %s.%s" % self.firmware_version,
"Flags: %s" % self.flags,
))
def __repr__(self):
return '<{klass} (slotID={slot_id} flags={flags})>'.format(
klass=type(self).__name__,
slot_id=self.slot_id,
flags=str(self.flags))
class Token:
"""
A PKCS#11 token.
A token can be physically installed in a :class:`Slot`, or a software
token, depending on your PKCS#11 library.
"""
def __init__(self, slot,
label=None,
serialNumber=None,
model=None,
manufacturerID=None,
hardwareVersion=None,
firmwareVersion=None,
flags=None,
**kwargs):
self.slot = slot
"""The :class:`Slot` this token is installed in."""
self.label = _CK_UTF8CHAR_to_str(label)
"""Label of this token (:class:`str`)."""
self.serial = serialNumber.rstrip()
"""Serial number of this token (:class:`bytes`)."""
self.manufacturer_id = _CK_UTF8CHAR_to_str(manufacturerID)
"""Manufacturer ID."""
self.model = _CK_UTF8CHAR_to_str(model)
"""Model name."""
self.hardware_version = _CK_VERSION_to_tuple(hardwareVersion)
"""Hardware version (:class:`tuple`)."""
self.firmware_version = _CK_VERSION_to_tuple(firmwareVersion)
"""Firmware version (:class:`tuple`)."""
self.flags = TokenFlag(flags)
"""Capabilities of this token (:class:`pkcs11.flags.TokenFlag`)."""
def __eq__(self, other):
return self.slot == other.slot
def open(self, rw=False, user_pin=None, so_pin=None):
"""
Open a session on the token and optionally log in as a user or
security officer (pass one of `user_pin` or `so_pin`). Pass PROTECTED_AUTH to
indicate the pin should be supplied via an external mechanism (e.g. pin pad).
Can be used as a context manager or close with :meth:`Session.close`.
::
with token.open() as session:
print(session)
:param rw: True to create a read/write session.
:param bytes user_pin: Authenticate to this session as a user.
:param bytes so_pin: Authenticate to this session as a
security officer.
:rtype: Session
"""
raise NotImplementedError()
def __str__(self):
return self.label
def __repr__(self):
return "<{klass} (label='{label}' serial={serial} flags={flags})>"\
.format(klass=type(self).__name__,
label=self.label,
serial=self.serial,
flags=str(self.flags))
class Session:
"""
A PKCS#11 :class:`Token` session.
A session is required to do nearly all operations on a token including
encryption/signing/keygen etc.
Create a session using :meth:`Token.open`. Sessions can be used as a
context manager or closed with :meth:`close`.
"""
def __init__(self, token, handle, rw=False, user_type=UserType.NOBODY):
self.token = token
""":class:`Token` this session is on."""
self._handle = handle
# Big operation lock prevents other threads from entering/reentering
# operations. If the same thread enters the lock, they will get a
# Cryptoki warning
self._operation_lock = RLock()
self.rw = rw
"""True if this is a read/write session."""
self.user_type = user_type
"""User type for this session (:class:`pkcs11.constants.UserType`)."""
def __eq__(self, other):
return self.token == other.token and \
self._handle == other._handle
def __hash__(self):
return hash(self._handle)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
def close(self):
"""Close the session."""
raise NotImplementedError()
def get_key(self, object_class=None, key_type=None, label=None, id=None):
"""
Search for a key with any of `key_type`, `label` and/or `id`.
Returns a single key or throws :class:`pkcs11.exceptions.NoSuchKey` or
:class:`pkcs11.exceptions.MultipleObjectsReturned`.
This is a simplified version of :meth:`get_objects`, which allows
searching for any object.
:param ObjectClass object_class: Optional object class.
:param KeyType key_type: Optional key type.
:param str label: Optional key label.
:param bytes id: Optional key id.
:rtype: Key
"""
if object_class is None and \
key_type is None and \
label is None \
and id is None:
raise ArgumentsBad("Must specify at least one search parameter.")
attrs = {}
if object_class is not None:
attrs[Attribute.CLASS] = object_class
if key_type is not None:
attrs[Attribute.KEY_TYPE] = key_type
if label is not None:
attrs[Attribute.LABEL] = label
if id is not None:
attrs[Attribute.ID] = id
iterator = self.get_objects(attrs)
try:
try:
key = next(iterator)
except StopIteration:
raise NoSuchKey("No key matching %s" % attrs)
try:
next(iterator)
raise MultipleObjectsReturned("More than 1 key matches %s" %
attrs)
except StopIteration:
return key
finally:
# Force finalizing SearchIter rather than waiting for garbage
# collection, so that we release the operation lock.
iterator._finalize()
def get_objects(self, attrs=None):
"""
Search for objects matching `attrs`. Returns a generator.
::
for obj in session.get_objects({
Attribute.CLASS: ObjectClass.SECRET_KEY,
Attribute.LABEL: 'MY LABEL',
}):
print(obj)
This is the more generic version of :meth:`get_key`.
:param dict(Attribute,*) attrs: Attributes to search for.
:rtype: iter(Object)
"""
raise NotImplementedError()
def create_object(self, attrs):
"""
Create a new object on the :class:`Token`. This is a low-level
interface to create any type of object and can be used for importing
data onto the Token.
::
key = session.create_object({
pkcs11.Attribute.CLASS: pkcs11.ObjectClass.SECRET_KEY,
pkcs11.Attribute.KEY_TYPE: pkcs11.KeyType.AES,
pkcs11.Attribute.VALUE: b'SUPER SECRET KEY',
})
For generating keys see :meth:`generate_key` or
:meth:`generate_keypair`.
For importing keys see :ref:`importing-keys`.
Requires a read/write session, unless the object is not to be
stored. To permanently store the object in the HSM add **pkcs.Attribute.TOKEN: True**,
see :meth:`pkcs11.Attribute` for more available object attributes.
:param dict(Attribute,*) attrs: attributes of the object to create
:rtype: Object
"""
raise NotImplementedError()
def create_domain_parameters(self, key_type, attrs,
local=False, store=False):
"""
Create a domain parameters object from known parameters.
Domain parameters are used for key generation of key types such
as DH, DSA and EC.
You can also generate new parameters using
:meth:`generate_domain_parameters`.
The `local` parameter creates a Python object that is not created on
the HSM (its object handle will be unset). This is useful if you only
need the domain parameters to create another object, and do not need a
real PKCS #11 object in the session.
.. warning::
Domain parameters have no id or labels. Storing them is possible
but be aware they may be difficult to retrieve.
:param KeyType key_type: Key type these parameters are for
:param dict(Attribute,*) attrs: Domain parameters
(specific tp `key_type`)
:param local: if True, do not transfer parameters to the HSM.
:param store: if True, store these parameters permanently in the HSM.
:rtype: DomainParameters
"""
raise NotImplementedError()
def generate_domain_parameters(self, key_type, param_length, store=False,
mechanism=None, mechanism_param=None,
template=None):
"""
Generate domain parameters.
See :meth:`create_domain_parameters` for creating domain parameter
objects from known parameters.
See :meth:`generate_key` for documentation on mechanisms and templates.
.. warning::
Domain parameters have no id or labels. Storing them is possible
but be aware they may be difficult to retrieve.
:param KeyType key_type: Key type these parameters are for
:param int params_length: Size of the parameters (e.g. prime length)
in bits.
:param store: Store these parameters in the HSM
:param Mechanism mechanism: Optional generation mechanism (or default)
:param bytes mechanism_param: Optional mechanism parameter.
:param dict(Attribute,*) template: Optional additional attributes.
:rtype: DomainParameters
"""
raise NotImplementedError()
def generate_key(self, key_type, key_length=None,
id=None, label=None,
store=False, capabilities=None,
mechanism=None, mechanism_param=None,
template=None):
"""
Generate a single key (e.g. AES, DES).
Keys should set at least `id` or `label`.
An appropriate `mechanism` will be chosen for `key_type`
(see :attr:`DEFAULT_GENERATE_MECHANISMS`) or this can be overridden.
Similarly for the `capabilities` (see
:attr:`DEFAULT_KEY_CAPABILITIES`).
The `template` will extend the default template used to make the
key.
Possible mechanisms and template attributes are defined by `PKCS #11
<http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/pkcs11-curr-v2.40.html>`_.
Invalid mechanisms or attributes should raise
:exc:`pkcs11.exceptions.MechanismInvalid` and
:exc:`pkcs11.exceptions.AttributeTypeInvalid` respectively.
:param KeyType key_type: Key type (e.g. KeyType.AES)
:param int key_length: Key length in bits (e.g. 256).
:param bytes id: Key identifier.
:param str label: Key label.
:param store: Store key on token (requires R/W session).
:param MechanismFlag capabilities: Key capabilities (or default).
:param Mechanism mechanism: Generation mechanism (or default).
:param bytes mechanism_param: Optional vector to the mechanism.
:param dict(Attribute,*) template: Additional attributes.
:rtype: SecretKey
"""
raise NotImplementedError()
def generate_keypair(self, key_type, key_length=None, **kwargs):
"""
Generate a asymmetric keypair (e.g. RSA).
See :meth:`generate_key` for more information.
:param KeyType key_type: Key type (e.g. KeyType.DSA)
:param int key_length: Key length in bits (e.g. 256).
:param bytes id: Key identifier.
:param str label: Key label.
:param bool store: Store key on token (requires R/W session).
:param MechanismFlag capabilities: Key capabilities (or default).
:param Mechanism mechanism: Generation mechanism (or default).
:param bytes mechanism_param: Optional vector to the mechanism.
:param dict(Attribute,*) private_template: Additional attributes for private key.
:param dict(Attribute,*) public_template: Additional attributes for public key.
:rtype: (PublicKey, PrivateKey)
"""
if key_type is KeyType.DSA:
if key_length is None:
raise ArgumentsBad("Must specify `key_length`")
params = self.generate_domain_parameters(key_type, key_length)
return params.generate_keypair(**kwargs)
else:
return self._generate_keypair(key_type, key_length=key_length,
**kwargs)
def seed_random(self, seed):
"""
Mix additional seed material into the RNG (if supported).
:param bytes seed: Bytes of random to seed.
"""
raise NotImplementedError()
def generate_random(self, nbits):
"""
Generate `length` bits of random or pseudo-random data (if supported).
:param int nbits: Number of bits to generate.
:rtype: bytes
"""
raise NotImplementedError()
def digest(self, data, **kwargs):
"""
Digest `data` using `mechanism`.
`data` can be a single value or an iterator.
:class:`Key` objects can also be digested, optionally interspersed
with :class:`bytes`.
:param data: Data to digest
:type data: str, bytes, Key or iter(bytes, Key)
:param Mechanism mechanism: digest mechanism
:param bytes mechanism_param: optional mechanism parameter
:rtype: bytes
"""
# If data is a string, encode it now as UTF-8.
if isinstance(data, str):
data = data.encode('utf-8')
if isinstance(data, bytes):
return self._digest(data, **kwargs)
elif isinstance(data, Key):
data = (data,)
return self._digest_generator(data, **kwargs)
class Object:
"""
A PKCS#11 object residing on a :class:`Token`.
Objects implement :meth:`__getitem__` and :meth:`__setitem__` to
retrieve :class:`pkcs11.constants.Attribute` values on the object.
Valid attributes for an object are given in `PKCS #11
<http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/pkcs11-curr-v2.40.html>`_.
Invalid attributes should raise
:exc:`pkcs11.exceptions.AttributeTypeInvalid`.
"""
object_class = None
""":class:`pkcs11.constants.ObjectClass` of this Object."""
def __init__(self, session, handle):
self.session = session
""":class:`Session` this object is valid for."""
self._handle = handle
def __eq__(self, other):
return self.session == other.session and \
self._handle == other._handle
def __hash__(self):
return hash((self.session, self._handle))
def copy(self, attrs):
"""
Make a copy of the object with new attributes `attrs`.
Requires a read/write session, unless the object is not to be
stored.
::
new = key.copy({
Attribute.LABEL: 'MY NEW KEY',
})
Certain objects may not be copied. Calling :meth:`copy` on such
objects will result in an exception.
:param dict(Attribute,*) attrs: attributes for the new :class:`Object`
:rtype: Object
"""
raise NotImplementedError()
def destroy(self):
"""
Destroy the object.
Requires a read/write session, unless the object is not stored.
Certain objects may not be destroyed. Calling :meth:`destroy` on such
objects will result in an exception.
The :class:`Object` is no longer valid.
"""
raise NotImplementedError()
class DomainParameters(Object):
"""
PKCS#11 Domain Parameters.
Used to store domain parameters as part of the key generation step, e.g.
in DSA and Diffie-Hellman.
"""
def __init__(self, session, handle, params=None):
super().__init__(session, handle)
self.params = params
def __getitem__(self, key):
if self._handle is None:
try:
return self.params[key]
except KeyError:
raise AttributeTypeInvalid
else:
return super().__getitem__(key)
def __setitem__(self, key, value):
if self._handle is None:
self.params[key] = value
else:
super().__setitem__(key, value)
@cached_property
def key_type(self):
"""
Key type (:class:`pkcs11.mechanisms.KeyType`) these parameters
can be used to generate.
"""
return self[Attribute.KEY_TYPE]
def generate_keypair(self, id=None, label=None,
store=False, capabilities=None,
mechanism=None, mechanism_param=None,
public_template=None, private_template=None):
"""
Generate a key pair from these domain parameters (e.g. for
Diffie-Hellman.
See :meth:`Session.generate_key` for more information.
:param bytes id: Key identifier.
:param str label: Key label.
:param store: Store key on token (requires R/W session).
:param MechanismFlag capabilities: Key capabilities (or default).
:param Mechanism mechanism: Generation mechanism (or default).
:param bytes mechanism_param: Optional vector to the mechanism.
:param dict(Attribute,*) template: Additional attributes.
:rtype: (PublicKey, PrivateKey)
"""
raise NotImplementedError()
class Key(Object):
"""Base class for all key :class:`Object` types."""
@cached_property
def id(self):
"""Key id (:class:`bytes`)."""
return self[Attribute.ID]
@cached_property
def label(self):
"""Key label (:class:`str`)."""
return self[Attribute.LABEL]
@cached_property
def key_type(self):
"""Key type (:class:`pkcs11.mechanisms.KeyType`)."""
return self[Attribute.KEY_TYPE]
@cached_property
def _key_description(self):
"""A description of the key."""
try:
return '%s-bit %s' % (self.key_length, self.key_type.name)
except AttributeTypeInvalid:
return self.key_type.name
def __repr__(self):
return "<%s label='%s' id='%s' %s>" % (
type(self).__name__,
self.label,
hexlify(self.id).decode('ascii'),
self._key_description)
class SecretKey(Key):
"""
A PKCS#11 :attr:`pkcs11.constants.ObjectClass.SECRET_KEY` object
(symmetric encryption key).
"""
object_class = ObjectClass.SECRET_KEY
@cached_property
def key_length(self):
"""Key length in bits."""
return self[Attribute.VALUE_LEN] * 8
class PublicKey(Key):
"""
A PKCS#11 :attr:`pkcs11.constants.ObjectClass.PUBLIC_KEY` object
(asymmetric public key).
RSA private keys can be imported and exported from PKCS#1 DER-encoding
using :func:`pkcs11.util.rsa.decode_rsa_public_key` and
:func:`pkcs11.util.rsa.encode_rsa_public_key` respectively.
"""
object_class = ObjectClass.PUBLIC_KEY
@cached_property
def key_length(self):
"""Key length in bits."""
return self[Attribute.MODULUS_BITS]
class PrivateKey(Key):
"""
A PKCS#11 :attr:`pkcs11.constants.ObjectClass.PRIVATE_KEY` object
(asymmetric private key).
RSA private keys can be imported from PKCS#1 DER-encoding using
:func:`pkcs11.util.rsa.decode_rsa_private_key`.
.. warning::
Private keys imported directly, rather than unwrapped from a trusted
private key should be considered insecure.
"""
object_class = ObjectClass.PRIVATE_KEY
@cached_property
def key_length(self):
"""Key length in bits."""
return len(self[Attribute.MODULUS]) * 8
class Certificate(Object):
"""
A PKCS#11 :attr:`pkcs11.constants.ObjectClass.CERTIFICATE` object.
PKCS#11 is limited in its handling of certificates, and does not
provide features like parsing of X.509 etc. These should be handled in
an external library. PKCS#11 will not set attributes on the certificate
based on the `VALUE`.
:func:`pkcs11.util.x509.decode_x509_certificate` will extract attributes
from a certificate to create the object.
"""
object_class = ObjectClass.CERTIFICATE
@cached_property
def certificate_type(self):
"""
The type of certificate.
:rtype: CertificateType
"""
return self[Attribute.CERTIFICATE_TYPE]
class EncryptMixin(Object):
"""
This :class:`Object` supports the encrypt capability.
"""
def encrypt(self, data, buffer_size=8192, **kwargs):
"""
Encrypt some `data`.
Data can be either :class:`str` or :class:`bytes`, in which case it
will return :class:`bytes`; or an iterable of :class:`bytes` in
which case it will return a generator yielding :class:`bytes`
(be aware, more chunks will be output than input).
If you do not specify `mechanism` then the default from
:attr:`DEFAULT_ENCRYPT_MECHANISMS` will be used. If an iterable
is passed and the mechanism chosen does not support handling data
in chunks, an exception will be raised.
Some mechanisms (including the default CBC mechanisms) require
additional parameters, e.g. an initialisation vector [#]_, to
the mechanism. Pass this as `mechanism_param`.
Documentation of these parameters is given specified in `PKCS #11
<http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/pkcs11-curr-v2.40.html>`_.
When passing an iterable for data
`buffer_size` must be sufficient to store the working buffer. An
integer number of blocks and greater than or equal to the largest
input chunk is recommended.
The returned generator obtains a lock on the :class:`Session`
to prevent other threads from starting a simultaneous operation.
The lock is released when you consume/destroy the generator.
See :ref:`concurrency`.
.. warning::
It's not currently possible to cancel an encryption operation
by deleting the generator. You must consume the generator to
complete the operation.
An example of streaming a file is as follows:
::
def encrypt_file(file_in, file_out, buffer_size=8192):
with \\
open(file_in, 'rb') as input_, \\
open(file_out, 'wb') as output:
chunks = iter(lambda: input_.read(buffer_size), '')
for chunk in key.encrypt(chunks,
mechanism_param=iv,
buffer_size=buffer_size):
output.write(chunk)
:param data: data to encrypt
:type data: str, bytes or iter(bytes)
:param Mechanism mechanism: optional encryption mechanism
(or None for default)
:param bytes mechanism_param: optional mechanism parameter
(e.g. initialisation vector).
:param int buffer_size: size of the working buffer (for generators)
:rtype: bytes or iter(bytes)
.. [#] The initialisation vector should contain quality random,
e.g. from :meth:`Session.generate_random`.
This method will not return the value of the initialisation
vector as part of the encryption. You must store that yourself.
"""
# If data is a string, encode it now as UTF-8.
if isinstance(data, str):
data = data.encode('utf-8')
if isinstance(data, bytes):
return self._encrypt(data, **kwargs)
else:
return self._encrypt_generator(data,
buffer_size=buffer_size, **kwargs)
class DecryptMixin(Object):
"""
This :class:`Object` supports the decrypt capability.
"""
def decrypt(self, data, buffer_size=8192, **kwargs):
"""
Decrypt some `data`.
See :meth:`EncryptMixin.encrypt` for more information.
:param data: data to decrypt
:type data: bytes or iter(bytes)
:param Mechanism mechanism: optional encryption mechanism
(or None for default).
:param bytes mechanism_param: optional mechanism parameter
(e.g. initialisation vector).
:param int buffer_size: size of the working buffer (for generators).
:rtype: bytes or iter(bytes)
"""
# If we're not an iterable, call into our generator with an iterable
# version and join the result at the end.
if isinstance(data, bytes):
return self._decrypt(data, **kwargs)
else:
return self._decrypt_generator(data,
buffer_size=buffer_size, **kwargs)
class SignMixin(Object):
"""
This :class:`Object` supports the sign capability.
"""
def sign(self, data, **kwargs):
"""
Sign some `data`.
See :meth:`EncryptMixin.encrypt` for more information.
For DSA and ECDSA keys, PKCS #11 outputs the two parameters (r & s)
as two concatenated `biginteger` of the same length. To convert these
into other formats, such as the format used by OpenSSL, use
:func:`pkcs11.util.dsa.encode_dsa_signature` or
:func:`pkcs11.util.ec.encode_ecdsa_signature`.
:param data: data to sign
:type data: str, bytes or iter(bytes)
:param Mechanism mechanism: optional signing mechanism
:param bytes mechanism_param: optional mechanism parameter
:rtype: bytes
"""
# If data is a string, encode it now as UTF-8.
if isinstance(data, str):
data = data.encode('utf-8')
if isinstance(data, bytes):
return self._sign(data, **kwargs)
else:
return self._sign_generator(data, **kwargs)
class VerifyMixin(Object):
"""
This :class:`Object` supports the verify capability.
"""
def verify(self, data, signature, **kwargs):
"""
Verify some `data`.
See :meth:`EncryptMixin.encrypt` for more information.
Returns True if `signature` is valid for `data`.
For DSA and ECDSA keys, PKCS #11 expects the two parameters (r & s)
as two concatenated `biginteger` of the same length. To convert these
from other formats, such as the format used by OpenSSL, use
:func:`pkcs11.util.dsa.decode_dsa_signature` or
:func:`pkcs11.util.ec.decode_ecdsa_signature`.
:param data: data to sign
:type data: str, bytes or iter(bytes)
:param bytes signature: signature
:param Mechanism mechanism: optional signing mechanism
:param bytes mechanism_param: optional mechanism parameter
:rtype: bool
"""
# If data is a string, encode it now as UTF-8.
if isinstance(data, str):
data = data.encode('utf-8')
try:
if isinstance(data, bytes):
self._verify(data, signature, **kwargs)
else:
self._verify_generator(data, signature, **kwargs)
return True
except (SignatureInvalid, SignatureLenRange):
return False
class WrapMixin(Object):
"""
This :class:`Object` supports the wrap capability.
"""
def wrap_key(self, key,
mechanism=None, mechanism_param=None):
"""
Use this key to wrap (i.e. encrypt) `key` for export. Returns
an encrypted version of `key`.
`key` must have :attr:`Attribute.EXTRACTABLE` = True.
:param Key key: key to export
:param Mechanism mechanism: wrapping mechanism (or None for default).
:param bytes mechanism_param: mechanism parameter (if required)
:rtype: bytes
"""
raise NotImplementedError()
class UnwrapMixin(Object):
"""
This :class:`Object` supports the unwrap capability.
"""
def unwrap_key(self, object_class, key_type, key_data,
id=None, label=None,
mechanism=None, mechanism_param=None,
store=False, capabilities=None,
template=None):
"""
Use this key to unwrap (i.e. decrypt) and import `key_data`.
See :class:`Session.generate_key` for more information.
:param ObjectClass object_class: Object class to import as
:param KeyType key_type: Key type (e.g. KeyType.AES)
:param bytes key_data: Encrypted key to unwrap
:param bytes id: Key identifier.
:param str label: Key label.
:param store: Store key on token (requires R/W session).
:param MechanismFlag capabilities: Key capabilities (or default).
:param Mechanism mechanism: Generation mechanism (or default).
:param bytes mechanism_param: Optional vector to the mechanism.
:param dict(Attribute,*) template: Additional attributes.
:rtype: Key
"""
raise NotImplementedError()
class DeriveMixin(Object):
"""
This :class:`Object` supports the derive capability.
"""
def derive_key(self, key_type, key_length,
id=None, label=None,
store=False, capabilities=None,
mechanism=None, mechanism_param=None,
template=None):
"""
Derive a new key from this key. Used to create session
keys from a PKCS key exchange.
Typically the mechanism, e.g. Diffie-Hellman, requires you
to specify the other party's piece of shared information as
the `mechanism_param`. Some mechanisms require a tuple of data (see
:class:`pkcs11.mechanisms.Mechanism`).
See :class:`Session.generate_key` for more documentation on key
generation.
Diffie-Hellman example:
::
# Diffie-Hellman domain parameters
# e.g. from RFC 3526, RFC 5114 or `openssl dhparam`
prime = [0xFF, ...]
base = [0x02]
parameters = session.create_domain_parameters(KeyType.DH, {
Attribute.PRIME: prime,
Attribute.BASE: base,
}, local=True)
# Alice generates a DH key pair from the public
# Diffie-Hellman parameters
public, private = parameters.generate_keypair()
alices_value = public[Attribute.VALUE]
# Bob generates a DH key pair from the same parameters.
# Alice exchanges public values with Bob...
# She sends `alices_value` and receives `bobs_value`.
# (Assuming Alice is doing AES CBC, she also needs to send an IV)
# Alice generates a session key with Bob's public value
# Bob will generate the same session key using Alice's value.
session_key = private.derive_key(
KeyType.AES, 128,
mechanism_param=bobs_value)
Elliptic-Curve Diffie-Hellman example:
::
# DER encoded EC params, e.g. from OpenSSL
# openssl ecparam -outform der -name prime192v1 | base64
#
# Check what EC parameters the module supports with
# slot.get_module_info()
parameters = session.create_domain_parameters(KeyType.EC, {
Attribute.EC_PARAMS: b'...',
}, local=True)
# Alice generates a EC key pair, and gets her public value
public, private = parameters.generate_keypair()
alices_value = public[Attribute.EC_POINT]
# Bob generates a DH key pair from the same parameters.
# Alice exchanges public values with Bob...
# She sends `alices_value` and receives `bobs_value`.
# Alice generates a session key with Bob's public value
# Bob will generate the same session key using Alice's value.
session_key = private.derive_key(
KeyType.AES, 128,
mechanism_param=(KDF.NULL, None, bobs_value))
:param KeyType key_type: Key type (e.g. KeyType.AES)
:param int key_length: Key length in bits (e.g. 256).
:param bytes id: Key identifier.
:param str label: Key label.
:param store: Store key on token (requires R/W session).
:param MechanismFlag capabilities: Key capabilities (or default).
:param Mechanism mechanism: Generation mechanism (or default).
:param bytes mechanism_param: Optional vector to the mechanism.
:param dict(Attribute,*) template: Additional attributes.
:rtype: SecretKey
"""
raise NotImplementedError()
|
networkx/utils/tests/test_random_sequence.py | jebogaert/networkx | 10,024 | 11130602 | import pytest
from networkx.utils import (
powerlaw_sequence,
zipf_rv,
random_weighted_sample,
weighted_choice,
)
def test_degree_sequences():
seq = powerlaw_sequence(10, seed=1)
seq = powerlaw_sequence(10)
assert len(seq) == 10
def test_zipf_rv():
r = zipf_rv(2.3, xmin=2, seed=1)
r = zipf_rv(2.3, 2, 1)
r = zipf_rv(2.3)
assert type(r), int
pytest.raises(ValueError, zipf_rv, 0.5)
pytest.raises(ValueError, zipf_rv, 2, xmin=0)
def test_random_weighted_sample():
mapping = {"a": 10, "b": 20}
s = random_weighted_sample(mapping, 2, seed=1)
s = random_weighted_sample(mapping, 2)
assert sorted(s) == sorted(mapping.keys())
pytest.raises(ValueError, random_weighted_sample, mapping, 3)
def test_random_weighted_choice():
mapping = {"a": 10, "b": 0}
c = weighted_choice(mapping, seed=1)
c = weighted_choice(mapping)
assert c == "a"
|
fuzzinator/call/stream_monitored_subprocess_call.py | renatahodovan/fuzzinator | 202 | 11130619 | <reponame>renatahodovan/fuzzinator
# Copyright (c) 2016-2021 <NAME>, <NAME>.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import errno
import fcntl
import logging
import os
import select
import subprocess
import time
from ..config import as_dict, as_list, as_pargs, as_path, decode
from ..controller import Controller
from .call import Call
from .non_issue import NonIssue
from .regex_automaton import RegexAutomaton
logger = logging.getLogger(__name__)
class StreamMonitoredSubprocessCall(Call):
"""
Subprocess invocation-based call of a SUT that takes test input on its
command line. The main difference from
:func:`fuzzinator.call.SubprocessCall` is that it continuously monitors
the stdout and stderr streams of the SUT and forces it to terminate if
some predefined patterns are appearing.
**Mandatory parameter of the SUT call:**
- ``command``: string to pass to the child shell as a command to run (all
occurrences of ``{test}`` in the string are replaced by the actual test
input).
**Optional parameters of the SUT call:**
- ``cwd``: if not ``None``, change working directory before the command
invocation.
- ``env``: if not ``None``, a dictionary of variable names-values to
update the environment with.
- ``end_patterns``: array of patterns to match against the lines of stdout
and stderr streams. The patterns and instructions are interpreted as
defined in :class:`fuzzinator.call.RegexAutomaton`.
- ``timeout``: run subprocess with timeout.
- ``encoding``: stdout and stderr encoding (default: autodetect).
**Result of the SUT call:**
- If processing stdout and stderr with ``end_patterns`` doesn't produce
any result, no issue is returned.
- Otherwise, an issue with keys from the matching patterns of
``end_pattern`` extended with the ``'exit_code'``, ``'stdout'``,
``'stderr'`` and ``'time'`` properties is returned.
**Example configuration snippet:**
.. code-block:: ini
[sut.foo]
call=fuzzinator.call.StreamMonitoredSubprocessCall
[sut.foo.call]
# assuming that {test} is something that can be interpreted by foo as
# command line argument
command=./bin/foo {test}
cwd=/home/alice/foo
env={"BAR": "1"}
end_patterns=["mss /(?P<file>[^:]+):(?P<line>[0-9]+): (?P<func>[^:]+): (?P<msg>Assertion `.*' failed)/"]
timeout=30
.. note::
Not available on platforms without fcntl support (e.g., Windows).
"""
def __init__(self, *, command, cwd=None, env=None, end_patterns=None, timeout=None, encoding=None, **kwargs):
self.command = command
self.cwd = as_path(cwd) if cwd else os.getcwd()
self.end_patterns = [RegexAutomaton.split_pattern(p) for p in as_list(end_patterns)] if end_patterns else []
self.env = dict(os.environ, **as_dict(env)) if env else None
self.timeout = int(timeout) if timeout else None
self.encoding = encoding
def __call__(self, *, test, timeout=None, **kwargs):
timeout = timeout or self.timeout
start_time = time.time()
proc = subprocess.Popen(as_pargs(self.command.format(test=test)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.cwd,
env=self.env)
streams = {'stdout': '', 'stderr': ''}
select_fds = [stream.fileno() for stream in [proc.stderr, proc.stdout]]
for fd in select_fds:
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
issue = dict()
end_loop = False
regex_automaton = RegexAutomaton(self.end_patterns)
while not end_loop:
try:
try:
read_fds = select.select(select_fds, [], select_fds, 0.5)[0]
except select.error as e:
if e.args[0] == errno.EINVAL:
continue
raise
for stream in streams:
if getattr(proc, stream).fileno() in read_fds:
while True:
chunk = getattr(proc, stream).read(512)
if not chunk:
break
streams[stream] += decode(chunk, self.encoding)
# Process the stream content line-by-line.
terminate, new_details = regex_automaton.process(streams[stream].splitlines(), issue)
if new_details or terminate:
end_loop = True
if proc.poll() is not None or (timeout and time.time() - start_time > timeout):
break
except IOError as e:
logger.warning('Exception in stream filtering.', exc_info=e)
end_time = time.time()
Controller.kill_process_tree(proc.pid)
logger.debug('%s\n%s', streams['stdout'], streams['stderr'])
proc_details = {
'exit_code': proc.returncode,
'stderr': streams['stderr'],
'stdout': streams['stdout'],
'time': end_time - start_time,
}
if issue:
issue.update(proc_details)
return issue
return NonIssue(proc_details)
|
mayan/apps/common/menus.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 11130667 | from django.utils.translation import ugettext_lazy as _
from mayan.apps.navigation.classes import Menu
from .icons import icon_menu_about, icon_menu_user
menu_about = Menu(
icon=icon_menu_about, label=_('System'), name='about'
)
menu_facet = Menu(label=_('Facet'), name='facet')
menu_list_facet = Menu(label=_('Facet'), name='list facet')
menu_main = Menu(name='main')
menu_multi_item = Menu(name='multi item')
menu_object = Menu(label=_('Actions'), name='object')
menu_related = Menu(label=_('Related'), name='related')
menu_secondary = Menu(label=_('Secondary'), name='secondary')
menu_setup = Menu(name='setup')
menu_return = Menu(label=_('Return'), name='return')
menu_tools = Menu(name='tools')
menu_topbar = Menu(name='topbar')
menu_user = Menu(
icon=icon_menu_user, name='user', label=_('User')
)
|
venv/Lib/site-packages/pyrsistent/typing.py | ajayiagbebaku/NFL-Model | 1,738 | 11130675 | """Helpers for use with type annotation.
Use the empty classes in this module when annotating the types of Pyrsistent
objects, instead of using the actual collection class.
For example,
from pyrsistent import pvector
from pyrsistent.typing import PVector
myvector: PVector[str] = pvector(['a', 'b', 'c'])
"""
from __future__ import absolute_import
try:
from typing import Container
from typing import Hashable
from typing import Generic
from typing import Iterable
from typing import Mapping
from typing import Sequence
from typing import Sized
from typing import TypeVar
__all__ = [
'CheckedPMap',
'CheckedPSet',
'CheckedPVector',
'PBag',
'PDeque',
'PList',
'PMap',
'PSet',
'PVector',
]
T = TypeVar('T')
KT = TypeVar('KT')
VT = TypeVar('VT')
class CheckedPMap(Mapping[KT, VT], Hashable):
pass
# PSet.add and PSet.discard have different type signatures than that of Set.
class CheckedPSet(Generic[T], Hashable):
pass
class CheckedPVector(Sequence[T], Hashable):
pass
class PBag(Container[T], Iterable[T], Sized, Hashable):
pass
class PDeque(Sequence[T], Hashable):
pass
class PList(Sequence[T], Hashable):
pass
class PMap(Mapping[KT, VT], Hashable):
pass
# PSet.add and PSet.discard have different type signatures than that of Set.
class PSet(Generic[T], Hashable):
pass
class PVector(Sequence[T], Hashable):
pass
class PVectorEvolver(Generic[T]):
pass
class PMapEvolver(Generic[KT, VT]):
pass
class PSetEvolver(Generic[T]):
pass
except ImportError:
pass
|
mayan/apps/documents/views/recently_created_document_views.py | atitaya1412/Mayan-EDMS | 343 | 11130743 | <gh_stars>100-1000
from django.utils.translation import ugettext_lazy as _
from ..icons import icon_document_recently_created_list
from ..models.document_models import RecentlyCreatedDocument
from .document_views import DocumentListView
__all__ = ('RecentCreatedDocumentListView',)
class RecentCreatedDocumentListView(DocumentListView):
def get_document_queryset(self):
return RecentlyCreatedDocument.valid.all()
def get_extra_context(self):
context = super().get_extra_context()
context.update(
{
'no_results_icon': icon_document_recently_created_list,
'no_results_text': _(
'This view will list the latest documents created '
'in the system.'
),
'no_results_title': _(
'There are no recently created documents'
),
'title': _('Recently created'),
}
)
return context
|
utils/rq_queryqueue.py | wranders/crackq | 908 | 11130750 | import datetime
import rq
import sys
from rq import use_connection, Queue
from rq.serializers import JSONSerializer
from redis import Redis
if len(sys.argv) < 2:
print('Usage: ./{} <queue-name>')
exit(1)
redis_con = Redis('redis', 6379)
redis_q = Queue(sys.argv[1], connection=redis_con,
serializer=JSONSerializer)
base = rq.registry.BaseRegistry(sys.argv[1],
connection=redis_con, serializer=JSONSerializer)
started = rq.registry.StartedJobRegistry(sys.argv[1],
connection=redis_con)
failed = rq.registry.FailedJobRegistry(sys.argv[1],
connection=redis_con)
comp = rq.registry.FinishedJobRegistry(sys.argv[1],
connection=redis_con)
comp_list = comp.get_job_ids()
cur_list = started.get_job_ids()
failed_list = failed.get_job_ids()
queue = redis_q.job_ids
print('Complete: {}'.format(comp_list))
print('Failed: {}'.format(failed_list))
print('Current: {}'.format(cur_list))
print('Queued: {}'.format(queue))
|
recipes/Python/334916_A_numarray_set_complement/recipe-334916.py | tdiprima/code | 2,023 | 11130801 | <reponame>tdiprima/code
#!/usr/bin/env python
import numarray
def complement(ind_arr, n):
"""
Find the complement of the set of indices in ind_arr from
arange(n)
"""
mat = numarray.ones(n)
numarray.put(mat, ind_arr, 0)
out = numarray.nonzero(mat)
return out[0]
if __name__ == "__main__":
orig_arr = numarray.arange(10) + 0.2
indices = numarray.array([1, 3, 5])
comp = complement(indices, len(orig_arr))
comp_arr = numarray.take(orig_arr, comp)
print "orig_arr: ", orig_arr
print "indices: ", indices
print "complement indices: ", comp
print "complement elements: ", comp_arr
|
Packs/CommonScripts/Scripts/DockerHardeningCheck/DockerHardeningCheck_test.py | diCagri/content | 799 | 11130845 | from DockerHardeningCheck import check_memory, mem_size_to_bytes, check_pids, check_fd_limits, check_non_root, check_cpus
from pytest import skip
import os
def test_check_memory():
assert 'memory cgroup configuration' in check_memory("10m", "cgroup")
def test_mem_size():
assert mem_size_to_bytes("1g") == (1024 * 1024 * 1024)
assert mem_size_to_bytes("512m") == (512 * 1024 * 1024)
def test_pids():
assert check_pids(10)
def test_fd_limits():
assert check_fd_limits(100, 200)
def test_non_root():
assert not check_non_root() # we run tests as non root
def test_check_cpus():
if os.getenv("CI") == "true":
skip("skipping as in CI we run with a single CPU")
return
assert check_cpus(1) # during unit tests we should fail
|
lib/python2.7/site-packages/django/forms/util.py | bop/bauhaus | 285 | 11130847 | <gh_stars>100-1000
from __future__ import unicode_literals
from django.conf import settings
from django.utils.html import format_html, format_html_join
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils import six
import sys
# Import ValidationError so that it can be imported from this
# module to maintain backwards compatibility.
from django.core.exceptions import ValidationError
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
The result is passed through 'mark_safe'.
"""
return format_html_join('', ' {0}="{1}"', sorted(attrs.items()))
@python_2_unicode_compatible
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def __str__(self):
return self.as_ul()
def as_ul(self):
if not self: return ''
return format_html('<ul class="errorlist">{0}</ul>',
format_html_join('', '<li>{0}{1}</li>',
((k, force_text(v))
for k, v in self.items())
))
def as_text(self):
return '\n'.join(['* %s\n%s' % (k, '\n'.join([' * %s' % force_text(i) for i in v])) for k, v in self.items()])
@python_2_unicode_compatible
class ErrorList(list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __str__(self):
return self.as_ul()
def as_ul(self):
if not self: return ''
return format_html('<ul class="errorlist">{0}</ul>',
format_html_join('', '<li>{0}</li>',
((force_text(e),) for e in self)
)
)
def as_text(self):
if not self: return ''
return '\n'.join(['* %s' % force_text(e) for e in self])
def __repr__(self):
return repr([force_text(e) for e in self])
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive dateimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
|
create_swag/lm/pretrain_lm.py | gauravkmr/swagaf | 182 | 11130870 | import os
import pandas as pd
import torch
from allennlp.data import Instance
from allennlp.data import Token
from allennlp.data import Vocabulary
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer
from torch import optim
from create_swag.lm.simple_bilm import SimpleBiLM
from raw_data.events import _postprocess
from pytorch_misc import clip_grad_norm, print_para, time_batch
from create_swag.lm.config import PRETRAIN_TXT
assert os.path.exists('../vocabulary')
vocab = Vocabulary.from_files('../vocabulary')
indexer = ELMoTokenCharactersIndexer()
def batcher(inp_list):
""" batches, asumming everything is padded and tokenized."""
instances = [Instance({'story': TextField([Token(x) for x in ['@@bos@@'] + subl + ['@@eos@@']], token_indexers={
'tokens': SingleIdTokenIndexer(namespace='tokens', lowercase_tokens=True), 'char_encoding': indexer}),
}) for subl in inp_list]
batch = Batch(instances)
batch.index_instances(vocab)
result_dict = batch.as_tensor_dict()['story']
result_dict['story'] = inp_list
return result_dict
def data_runner(start_point=0, minlength=4):
print("starting at {}".format(start_point))
with open(PRETRAIN_TXT, 'r') as f:
f.seek(start_point)
f.readline() # Clear the partial line
for i, line in enumerate(f):
yield _postprocess(line)
def _sample_a_good_pair(gen, seq_length, min_length=3):
cur_status = []
eos_idxs = [i for i, x in enumerate(cur_status) if x in ('.', '!', '?')]
while len(eos_idxs) < 2:
cur_status.extend([x for x in next(gen).split(' ') if x is not '\n'])
eos_idxs = [i for i, x in enumerate(cur_status) if x in ('.', '!', '?')]
if eos_idxs[1] >= seq_length:
return _sample_a_good_pair(gen, seq_length, min_length=min_length)
elif (eos_idxs[0] < min_length) or (eos_idxs[1] - eos_idxs[0]) < min_length: # Too short
return _sample_a_good_pair(gen, seq_length, min_length=min_length)
return cur_status[:eos_idxs[1] + 1]
def looped_data_runner(batch_size=128, seq_length=50):
offset = 0
TOTAL_BYTES_TRAIN = 4343022454
generators = [data_runner(start_point=TOTAL_BYTES_TRAIN * i // batch_size + offset, minlength=0) for i in
range(batch_size)]
while True:
for g_i, gen in enumerate(generators):
yield _sample_a_good_pair(gen, seq_length=seq_length, min_length=5)
def bucketed_data_runner(batch_size=64, seq_length=50):
length2batch = [[] for i in range(seq_length + 1)]
# Get diverse samples
for batch in looped_data_runner(batch_size=128, seq_length=seq_length):
length2batch[len(batch)].append(batch)
if len(length2batch[len(batch)]) >= batch_size:
# print("Yielding now of size {}".format(len(batch)))
yield batcher(length2batch[len(batch)])
length2batch[len(batch)] = []
# Dataloader
model = SimpleBiLM(vocab=vocab, recurrent_dropout_probability=0.2, embedding_dropout_probability=0.2)
model.cuda()
tr = []
model.train()
for epoch_num in range(2):
if epoch_num == 0:
optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], weight_decay=1e-6, lr=1e-3)
else:
optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], weight_decay=1e-6, lr=1e-4)
print(print_para(model))
for b, (time_per_batch, batch) in enumerate(time_batch(bucketed_data_runner())):
batch['tokens'] = batch['tokens'].cuda(async=True)
model_forward = model(batch['tokens'])
losses = {key: model_forward[key] for key in ['forward_loss', 'reverse_loss']}
tr.append(pd.Series({k: v.data[0] for k, v in losses.items()}))
loss = sum(losses.values())
optimizer.zero_grad()
loss.backward()
if b % 100 == 0 and b > 0:
df_cat = pd.concat(tr[-100:], axis=1).mean(1)
print("b{:8d} {:.3f}s/batch, fwd loss {:.3f} rev loss {:.3f} ".format(b, time_per_batch,
df_cat['forward_loss'],
df_cat['reverse_loss']), flush=True)
clip_grad_norm(
[(n, p) for n, p in model.named_parameters() if p.grad is not None],
max_norm=1.0, verbose=b % 1000 == 1, clip=True)
optimizer.step()
if b % 10000 == 0 and b > 0:
torch.save({'state_dict': model.state_dict()}, 'e{}-tbooks-pretrained-ckpt-{}.tar'.format(epoch_num, b))
|
sdk/python/pulumi_aws/appmesh/get_virtual_service.py | chivandikwa/pulumi-aws | 260 | 11130887 | <reponame>chivandikwa/pulumi-aws
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVirtualServiceResult',
'AwaitableGetVirtualServiceResult',
'get_virtual_service',
'get_virtual_service_output',
]
@pulumi.output_type
class GetVirtualServiceResult:
"""
A collection of values returned by getVirtualService.
"""
def __init__(__self__, arn=None, created_date=None, id=None, last_updated_date=None, mesh_name=None, mesh_owner=None, name=None, resource_owner=None, specs=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_updated_date and not isinstance(last_updated_date, str):
raise TypeError("Expected argument 'last_updated_date' to be a str")
pulumi.set(__self__, "last_updated_date", last_updated_date)
if mesh_name and not isinstance(mesh_name, str):
raise TypeError("Expected argument 'mesh_name' to be a str")
pulumi.set(__self__, "mesh_name", mesh_name)
if mesh_owner and not isinstance(mesh_owner, str):
raise TypeError("Expected argument 'mesh_owner' to be a str")
pulumi.set(__self__, "mesh_owner", mesh_owner)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_owner and not isinstance(resource_owner, str):
raise TypeError("Expected argument 'resource_owner' to be a str")
pulumi.set(__self__, "resource_owner", resource_owner)
if specs and not isinstance(specs, list):
raise TypeError("Expected argument 'specs' to be a list")
pulumi.set(__self__, "specs", specs)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> str:
"""
The ARN of the virtual service.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> str:
"""
The creation date of the virtual service.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastUpdatedDate")
def last_updated_date(self) -> str:
"""
The last update date of the virtual service.
"""
return pulumi.get(self, "last_updated_date")
@property
@pulumi.getter(name="meshName")
def mesh_name(self) -> str:
return pulumi.get(self, "mesh_name")
@property
@pulumi.getter(name="meshOwner")
def mesh_owner(self) -> str:
return pulumi.get(self, "mesh_owner")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceOwner")
def resource_owner(self) -> str:
"""
The resource owner's AWS account ID.
"""
return pulumi.get(self, "resource_owner")
@property
@pulumi.getter
def specs(self) -> Sequence['outputs.GetVirtualServiceSpecResult']:
"""
The virtual service specification
"""
return pulumi.get(self, "specs")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
A map of tags.
"""
return pulumi.get(self, "tags")
class AwaitableGetVirtualServiceResult(GetVirtualServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualServiceResult(
arn=self.arn,
created_date=self.created_date,
id=self.id,
last_updated_date=self.last_updated_date,
mesh_name=self.mesh_name,
mesh_owner=self.mesh_owner,
name=self.name,
resource_owner=self.resource_owner,
specs=self.specs,
tags=self.tags)
def get_virtual_service(mesh_name: Optional[str] = None,
mesh_owner: Optional[str] = None,
name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualServiceResult:
"""
The App Mesh Virtual Service data source allows details of an App Mesh Virtual Service to be retrieved by its name, mesh_name, and optionally the mesh_owner.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test = aws.appmesh.get_virtual_service(mesh_name="example-mesh",
name="example.mesh.local")
```
```python
import pulumi
import pulumi_aws as aws
current = aws.get_caller_identity()
test = aws.appmesh.get_virtual_service(name="example.mesh.local",
mesh_name="example-mesh",
mesh_owner=current.account_id)
```
:param str mesh_name: The name of the service mesh in which the virtual service exists.
:param str mesh_owner: The AWS account ID of the service mesh's owner.
:param str name: The name of the virtual service.
:param Mapping[str, str] tags: A map of tags.
"""
__args__ = dict()
__args__['meshName'] = mesh_name
__args__['meshOwner'] = mesh_owner
__args__['name'] = name
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:appmesh/getVirtualService:getVirtualService', __args__, opts=opts, typ=GetVirtualServiceResult).value
return AwaitableGetVirtualServiceResult(
arn=__ret__.arn,
created_date=__ret__.created_date,
id=__ret__.id,
last_updated_date=__ret__.last_updated_date,
mesh_name=__ret__.mesh_name,
mesh_owner=__ret__.mesh_owner,
name=__ret__.name,
resource_owner=__ret__.resource_owner,
specs=__ret__.specs,
tags=__ret__.tags)
@_utilities.lift_output_func(get_virtual_service)
def get_virtual_service_output(mesh_name: Optional[pulumi.Input[str]] = None,
mesh_owner: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVirtualServiceResult]:
"""
The App Mesh Virtual Service data source allows details of an App Mesh Virtual Service to be retrieved by its name, mesh_name, and optionally the mesh_owner.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test = aws.appmesh.get_virtual_service(mesh_name="example-mesh",
name="example.mesh.local")
```
```python
import pulumi
import pulumi_aws as aws
current = aws.get_caller_identity()
test = aws.appmesh.get_virtual_service(name="example.mesh.local",
mesh_name="example-mesh",
mesh_owner=current.account_id)
```
:param str mesh_name: The name of the service mesh in which the virtual service exists.
:param str mesh_owner: The AWS account ID of the service mesh's owner.
:param str name: The name of the virtual service.
:param Mapping[str, str] tags: A map of tags.
"""
...
|
tests/test_scan.py | enmathe/ggshield | 794 | 11130917 | <reponame>enmathe/ggshield<filename>tests/test_scan.py
from os import getcwd
from unittest.mock import ANY, Mock, patch
from ggshield.config import Cache
from ggshield.dev_scan import cd
from ggshield.scan import Commit
from ggshield.utils import SupportedScanMode
from tests.conftest import _SIMPLE_SECRET
def test_cd_context_manager():
prev = getcwd()
with cd("/tmp"): # nosec
assert getcwd() == "/tmp" # nosec
assert getcwd() == prev
@patch("pygitguardian.GGClient.multi_content_scan")
def test_request_headers(scan_mock: Mock, client):
c = Commit()
c._patch = _SIMPLE_SECRET
mode = SupportedScanMode.PATH
c.scan(
client=client,
cache=Cache(),
matches_ignore={},
all_policies=True,
verbose=False,
mode_header=mode.value,
)
scan_mock.assert_called_with(ANY, {"mode": mode.value})
|
example/migrations/0002_taggeditem.py | sha016/django-rest-framework-json-api | 1,011 | 11130956 | import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("example", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="TaggedItem",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("modified_at", models.DateTimeField(auto_now=True)),
("tag", models.SlugField()),
("object_id", models.PositiveIntegerField()),
(
"content_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
),
),
],
options={
"abstract": False,
},
),
]
|
lib/performance_metrics.py | janged/explainx | 310 | 11130969 | <reponame>janged/explainx<filename>lib/performance_metrics.py
# Import modules
import pandas as pd
import numpy as np
import sklearn
from sklearn import *
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.metrics import *
from itertools import cycle
import colorlover as cl
import plotly.figure_factory as ff
import plotly.graph_objects as go
class performance_metrics():
def __init__(self, y_test, y_pred):
super(performance_metrics, self).__init__()
self.param = None
self.y_test = y_test
self.y_pred = y_pred
def false_possitives_negatives(self):
f1 = f1_score(self.y_test, self.y_pred, average='micro')
accuracy = accuracy_score(self.y_test, self.y_pred)
cnf_matrix = confusion_matrix(self.y_test, self.y_pred)
FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
TP = np.diag(cnf_matrix)
TN = cnf_matrix.sum() - (FP + FN + TP)
FP = FP.astype(float).sum()
FN = FN.astype(float).sum()
TP = TP.astype(float).sum()
TN = TN.astype(float).sum()
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy for each class
ACC = (TP+TN)/(TP+FP+FN+TN)
fp_fn_table = pd.DataFrame(dict({'False Possitives (%)': [(FPR * 100).round(2)],
'False Negatives (%)': [(FNR * 100).round(2)],
'Accuracy (%)': accuracy.round(2)* 100,
'F1': f1.round(2)}))
fig_metrics_table = ff.create_table(fp_fn_table, height_constant=15)
return fig_metrics_table
def get_matrix(self):
y_test = label_binarize(self.y_test, classes=list(set(self.y_test.flatten())))
n_classes = y_test.shape[1]
conf_matrix = confusion_matrix(y_test.argmax(axis=1), self.y_pred.argmax(axis=1))
return conf_matrix
def plot_roc(self):
y_test = label_binarize(self.y_test, classes=list(set(self.y_test.flatten())))
n_classes = y_test.shape[1]
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], self.y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), self.y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
data = []
trace1 = go.Scatter(x=fpr["micro"], y=tpr["micro"], mode='lines',
line=dict(color='deeppink', width=2, dash='dot'),
name='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
data.append(trace1)
trace2 = go.Scatter(x=fpr["macro"], y=tpr["macro"], mode='lines',
line=dict(color='navy', width=2, dash='dot'),
name='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]))
data.append(trace2)
colors = cycle(['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'])
for i, color in zip(range(n_classes), colors):
trace3 = go.Scatter(x=fpr[i], y=tpr[i],
mode='lines',
line=dict(color=color, width=2),
name='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
data.append(trace3)
trace4 = go.Scatter(x=[0, 1], y=[0, 1],
mode='lines',
line=dict(color='black', width=2, dash='dash'),
showlegend=False)
layout = go.Layout(title='Receiver operating characteristic',
xaxis=dict(title='False Positive Rate'),
yaxis=dict(title='True Positive Rate'),
margin=dict(pad=25))
fig = go.Figure(data=data, layout=layout)
return fig
def plot_pr(self):
colors = cycle(['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A',
'#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'])
# Binarize the output
y_test = label_binarize(self.y_test, classes=list(set(self.y_test.flatten())))
n_classes = y_test.shape[1]
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i], self.y_pred[:, i])
average_precision[i] = average_precision_score(y_test[:, i], self.y_pred[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
self.y_pred.ravel())
average_precision["micro"] = average_precision_score(y_test, self.y_pred,
average="micro")
data = []
trace2 = go.Scatter(x=recall["micro"], y=precision["micro"],
mode='lines',
line=dict(color='gold', width=2),
name='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
data.append(trace2)
for i, color in zip(range(n_classes), colors):
trace3 = go.Scatter(x=recall[i], y=precision[i],
mode='lines',
line=dict(color=color, width=2),
name='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
data.append(trace3)
layout = go.Layout(title='Precision-Recall curve',
xaxis=dict(title='Recall'),
yaxis=dict(title='Precision'),
margin=dict(pad=25))
fig = go.Figure(data=data, layout=layout)
return fig
def plot_pie(self):
cnf_matrix = confusion_matrix(self.y_test, self.y_pred)
FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
TP = np.diag(cnf_matrix)
TN = cnf_matrix.sum() - (FP + FN + TP)
fp = FP.astype(float).sum()
fn = FN.astype(float).sum()
tp = TP.astype(float).sum()
tn = TN.astype(float).sum()
label_text = ["True Positive", "False Negative","False Positive","True Negative"]
labels = ["TP", "FN", "FP", "TN"]
blue = cl.flipper()['seq']['9']['Blues']
red = cl.flipper()['seq']['9']['Reds']
colors = [blue[4], blue[1], red[1], red[4]]
trace0 = go.Pie(
labels=label_text,
values=[tp, fn, fp, tn],
hoverinfo='label+value+percent',
textinfo='text+value',
text=labels,
sort=False,
marker=dict(
colors=colors))
layout = go.Layout(
title=f'TP, TN, FP, FN',
margin=dict(l=10, r=10, t=60, b=10),
legend=dict(
bgcolor='rgba(255,255,255,0)',
orientation='h'))
data = [trace0]
figure = go.Figure(data=data, layout=layout)
return figure
def plot_matrix(self):
matrix_data = pd.DataFrame(confusion_matrix(self.y_test, self.y_pred))
matrix_data = matrix_data.astype('float') / matrix_data.sum(axis=1)[:, np.newaxis]*100
z = np.array(matrix_data).round(2)
x = list(matrix_data.columns)
y = list(matrix_data.index)
# change each element of z to type string for annotations
z_text = [[str(y) for y in x] for x in z]
# set up figure
fig_matrix = ff.create_annotated_heatmap(z, x=x, y=y, annotation_text=z_text, colorscale='Viridis')
# add title
fig_matrix.update_layout(title_text='<i><b>Confusion matrix (%)</b></i>',
xaxis = dict(title='Predicted Label'),
yaxis = dict(title='True Label'))
# adjust margins to make room for yaxis title
fig_matrix.update_layout(margin=dict(t=50, l=200))
# add colorbar
fig_matrix['data'][0]['showscale'] = True
return fig_matrix
def metrics_table(self):
# AUC score
lb = preprocessing.LabelBinarizer()
lb.fit(self.y_test)
y_test_roc = lb.transform(self.y_test)
y_pred_roc = lb.transform(self.y_pred)
auc = roc_auc_score(y_test_roc, y_pred_roc, average="macro").round(2)
# MAE
mae = mean_absolute_error(self.y_test, self.y_pred).round(2)
# MSE
mse = mean_squared_error(self.y_test, self.y_pred).round(2)
# Accuracy
accuracy = accuracy_score(self.y_test, self.y_pred).round(2)
# matthews_corrcoef
matthews_corrcoe = matthews_corrcoef(self.y_test, self.y_pred).round(2)
# Make DataFrames
metric = ['Accuracy','Area Under Curve','MAE','MSE','Matthews Corrcoef']
values = [accuracy, auc, mae, mse,matthews_corrcoe]
metrics_tab = pd.DataFrame({'metric': metric, 'values': values})
fig_metrics_table = ff.create_table(metrics_tab, height_constant=15)
return fig_metrics_table
def performance_metrics_regression(self):
maxerror = max_error(self.y_test, self.y_pred).round(2)
mae = mean_absolute_error(self.y_test, self.y_pred).round(2)
mse = mean_squared_error(self.y_test, self.y_pred).round(2)
r2 = r2_score(self.y_test, self.y_pred).round(2)
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
mape = mean_absolute_percentage_error(self.y_test, self.y_pred).round(2)
# Make DataFrames
metric = ['Max Error','R squared','MAE','MSE','MAPE']
values = [maxerror, r2, mae, mse, mape]
metrics_dataframe = pd.DataFrame({'metric': metric, 'values': values})
metrics_dataframe = ff.create_table(metrics_dataframe, height_constant=15)
return metrics_dataframe
def actuals(self):
cnf_matrix = confusion_matrix(self.y_test, self.y_pred)
FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
TP = np.diag(cnf_matrix)
TN = cnf_matrix.sum() - (FP + FN + TP)
fp = FP.astype(float).sum().round()
fn = FN.astype(float).sum().round()
tp = TP.astype(float).sum().round()
tn = TN.astype(float).sum().round()
total = tn + fn + fp + tp
predicted_no = [tn,fn]
predicted_yes = [fp,tp]
data = {'Predicted No':predicted_no, 'Predicted Yes':predicted_yes}
# Creates pandas DataFrame.
df = pd.DataFrame(data, index =['No', 'Yes'])
df['Total'] = df.sum(axis=1)
df.loc['Total',:]= df.sum(axis=0)
df.reset_index(inplace=True)
df.rename({"index" : 'Actual'}, axis=1, inplace=True)
actuals_table = ff.create_table(df, height_constant=15)
return actuals_table |
utils/regex.py | goztrk/django-htk | 206 | 11130975 | # Python Standard Library Imports
import re
class Re(object):
def __init__(self):
self.last_match = None
def match(self, pattern, text):
if type(pattern).__name__ == 'SRE_Pattern':
self.last_match = pattern.match(text)
else:
self.last_match = re.match(pattern, text)
return self.last_match
def search(self, pattern, text):
if type(pattern).__name__ == 'SRE_Pattern':
self.last_match = pattern.search(text)
else:
self.last_match = re.search(pattern, text)
return self.last_match
def sub(self, pattern, repl, string, count=0, flags=0):
def frepl(matchobj):
self.last_match = matchobj
return repl
if type(pattern).__name__ == 'SRE_Pattern':
result, n = pattern.subn(frepl, string, count=count)
else:
result, n = re.subn(pattern, frepl, string, count=count, flags=flags)
if n == 0:
self.last_match = None
return result
|
kitsune/upload/tests/__init__.py | AndrewDVXI/kitsune | 929 | 11130980 | <reponame>AndrewDVXI/kitsune
from django.conf import settings
from django.core.files import File
import factory
from nose.tools import eq_, raises
from kitsune.questions.tests import QuestionFactory
from kitsune.sumo.tests import TestCase
from kitsune.upload.models import ImageAttachment
from kitsune.upload.storage import RenameFileStorage
from kitsune.upload.utils import create_imageattachment, check_file_size, FileTooLargeError
from kitsune.users.tests import UserFactory
class ImageAttachmentFactory(factory.DjangoModelFactory):
class Meta:
model = ImageAttachment
creator = factory.SubFactory(UserFactory)
content_object = factory.SubFactory(QuestionFactory)
file = factory.django.FileField()
def check_file_info(file_info, name, width, height, delete_url, url, thumbnail_url):
eq_(name, file_info["name"])
eq_(width, file_info["width"])
eq_(height, file_info["height"])
eq_(delete_url, file_info["delete_url"])
eq_(url, file_info["url"])
eq_(thumbnail_url, file_info["thumbnail_url"])
def get_file_name(name):
storage = RenameFileStorage()
return storage.get_available_name(name)
class CheckFileSizeTestCase(TestCase):
"""Tests for check_file_size"""
def test_check_file_size_under(self):
"""No exception should be raised"""
with open("kitsune/upload/tests/media/test.jpg", "rb") as f:
up_file = File(f)
check_file_size(up_file, settings.IMAGE_MAX_FILESIZE)
@raises(FileTooLargeError)
def test_check_file_size_over(self):
"""FileTooLargeError should be raised"""
with open("kitsune/upload/tests/media/test.jpg", "rb") as f:
up_file = File(f)
# This should raise
check_file_size(up_file, 0)
class CreateImageAttachmentTestCase(TestCase):
def setUp(self):
super(CreateImageAttachmentTestCase, self).setUp()
self.user = UserFactory()
self.obj = QuestionFactory()
def tearDown(self):
ImageAttachment.objects.all().delete()
super(CreateImageAttachmentTestCase, self).tearDown()
def test_create_imageattachment(self):
"""
An image attachment is created from an uploaded file.
Verifies all appropriate fields are correctly set.
"""
with open("kitsune/upload/tests/media/test.jpg", "rb") as f:
up_file = File(f)
file_info = create_imageattachment({"image": up_file}, self.user, self.obj)
image = ImageAttachment.objects.all()[0]
check_file_info(
file_info,
name="test.png",
width=90,
height=120,
delete_url=image.get_delete_url(),
url=image.get_absolute_url(),
thumbnail_url=image.thumbnail.url,
)
class FileNameTestCase(TestCase):
def _match_file_name(self, name, name_end):
assert name.endswith(name_end), '"%s" does not end with "%s"' % (name, name_end)
def test_empty_file_name(self):
self._match_file_name("", "")
def test_empty_file_name_with_extension(self):
self._match_file_name(get_file_name(".wtf"), "3f8242")
def test_ascii(self):
self._match_file_name(get_file_name("some ascii.jpg"), "5959e0.jpg")
def test_ascii_dir(self):
self._match_file_name(get_file_name("dir1/dir2/some ascii.jpg"), "5959e0.jpg")
def test_low_unicode(self):
self._match_file_name(get_file_name("157d9383e6aeba7180378fd8c1d46f80.gif"), "bdaf1a.gif")
def test_high_unicode(self):
self._match_file_name(get_file_name("\u6709\u52b9.jpeg"), "ce1518.jpeg")
def test_full_mixed(self):
self._match_file_name(
get_file_name("123\xe5\xe5\xee\xe9\xf8\xe7\u6709\u52b9.png"), "686c11.png"
)
|
immutables/map.py | alvistack/MagicStack-immutables | 934 | 11130987 | <filename>immutables/map.py
import collections.abc
import itertools
import reprlib
import sys
__all__ = ('Map',)
# Thread-safe counter.
_mut_id = itertools.count(1).__next__
# Python version of _map.c. The topmost comment there explains
# all datastructures and algorithms.
# The code here follows C code closely on purpose to make
# debugging and testing easier.
def map_hash(o):
x = hash(o)
if sys.hash_info.width > 32:
return (x & 0xffffffff) ^ ((x >> 32) & 0xffffffff)
else:
return x
def map_mask(hash, shift):
return (hash >> shift) & 0x01f
def map_bitpos(hash, shift):
return 1 << map_mask(hash, shift)
def map_bitcount(v):
v = v - ((v >> 1) & 0x55555555)
v = (v & 0x33333333) + ((v >> 2) & 0x33333333)
v = (v & 0x0F0F0F0F) + ((v >> 4) & 0x0F0F0F0F)
v = v + (v >> 8)
v = (v + (v >> 16)) & 0x3F
return v
def map_bitindex(bitmap, bit):
return map_bitcount(bitmap & (bit - 1))
W_EMPTY, W_NEWNODE, W_NOT_FOUND = range(3)
void = object()
class _Unhashable:
__slots__ = ()
__hash__ = None
_NULL = _Unhashable()
del _Unhashable
class BitmapNode:
def __init__(self, size, bitmap, array, mutid):
self.size = size
self.bitmap = bitmap
assert isinstance(array, list) and len(array) == size
self.array = array
self.mutid = mutid
def clone(self, mutid):
return BitmapNode(self.size, self.bitmap, self.array.copy(), mutid)
def assoc(self, shift, hash, key, val, mutid):
bit = map_bitpos(hash, shift)
idx = map_bitindex(self.bitmap, bit)
if self.bitmap & bit:
key_idx = 2 * idx
val_idx = key_idx + 1
key_or_null = self.array[key_idx]
val_or_node = self.array[val_idx]
if key_or_null is _NULL:
sub_node, added = val_or_node.assoc(
shift + 5, hash, key, val, mutid)
if val_or_node is sub_node:
return self, added
if mutid and mutid == self.mutid:
self.array[val_idx] = sub_node
return self, added
else:
ret = self.clone(mutid)
ret.array[val_idx] = sub_node
return ret, added
if key == key_or_null:
if val is val_or_node:
return self, False
if mutid and mutid == self.mutid:
self.array[val_idx] = val
return self, False
else:
ret = self.clone(mutid)
ret.array[val_idx] = val
return ret, False
existing_key_hash = map_hash(key_or_null)
if existing_key_hash == hash:
sub_node = CollisionNode(
4, hash, [key_or_null, val_or_node, key, val], mutid)
else:
sub_node = BitmapNode(0, 0, [], mutid)
sub_node, _ = sub_node.assoc(
shift + 5, existing_key_hash,
key_or_null, val_or_node,
mutid)
sub_node, _ = sub_node.assoc(
shift + 5, hash, key, val,
mutid)
if mutid and mutid == self.mutid:
self.array[key_idx] = _NULL
self.array[val_idx] = sub_node
return self, True
else:
ret = self.clone(mutid)
ret.array[key_idx] = _NULL
ret.array[val_idx] = sub_node
return ret, True
else:
key_idx = 2 * idx
val_idx = key_idx + 1
n = map_bitcount(self.bitmap)
new_array = self.array[:key_idx]
new_array.append(key)
new_array.append(val)
new_array.extend(self.array[key_idx:])
if mutid and mutid == self.mutid:
self.size = 2 * (n + 1)
self.bitmap |= bit
self.array = new_array
return self, True
else:
return BitmapNode(
2 * (n + 1), self.bitmap | bit, new_array, mutid), True
def find(self, shift, hash, key):
bit = map_bitpos(hash, shift)
if not (self.bitmap & bit):
raise KeyError
idx = map_bitindex(self.bitmap, bit)
key_idx = idx * 2
val_idx = key_idx + 1
key_or_null = self.array[key_idx]
val_or_node = self.array[val_idx]
if key_or_null is _NULL:
return val_or_node.find(shift + 5, hash, key)
if key == key_or_null:
return val_or_node
raise KeyError(key)
def without(self, shift, hash, key, mutid):
bit = map_bitpos(hash, shift)
if not (self.bitmap & bit):
return W_NOT_FOUND, None
idx = map_bitindex(self.bitmap, bit)
key_idx = 2 * idx
val_idx = key_idx + 1
key_or_null = self.array[key_idx]
val_or_node = self.array[val_idx]
if key_or_null is _NULL:
res, sub_node = val_or_node.without(shift + 5, hash, key, mutid)
if res is W_EMPTY:
raise RuntimeError('unreachable code') # pragma: no cover
elif res is W_NEWNODE:
if (type(sub_node) is BitmapNode and
sub_node.size == 2 and
sub_node.array[0] is not _NULL):
if mutid and mutid == self.mutid:
self.array[key_idx] = sub_node.array[0]
self.array[val_idx] = sub_node.array[1]
return W_NEWNODE, self
else:
clone = self.clone(mutid)
clone.array[key_idx] = sub_node.array[0]
clone.array[val_idx] = sub_node.array[1]
return W_NEWNODE, clone
if mutid and mutid == self.mutid:
self.array[val_idx] = sub_node
return W_NEWNODE, self
else:
clone = self.clone(mutid)
clone.array[val_idx] = sub_node
return W_NEWNODE, clone
else:
assert sub_node is None
return res, None
else:
if key == key_or_null:
if self.size == 2:
return W_EMPTY, None
new_array = self.array[:key_idx]
new_array.extend(self.array[val_idx + 1:])
if mutid and mutid == self.mutid:
self.size -= 2
self.bitmap &= ~bit
self.array = new_array
return W_NEWNODE, self
else:
new_node = BitmapNode(
self.size - 2, self.bitmap & ~bit, new_array, mutid)
return W_NEWNODE, new_node
else:
return W_NOT_FOUND, None
def keys(self):
for i in range(0, self.size, 2):
key_or_null = self.array[i]
if key_or_null is _NULL:
val_or_node = self.array[i + 1]
yield from val_or_node.keys()
else:
yield key_or_null
def values(self):
for i in range(0, self.size, 2):
key_or_null = self.array[i]
val_or_node = self.array[i + 1]
if key_or_null is _NULL:
yield from val_or_node.values()
else:
yield val_or_node
def items(self):
for i in range(0, self.size, 2):
key_or_null = self.array[i]
val_or_node = self.array[i + 1]
if key_or_null is _NULL:
yield from val_or_node.items()
else:
yield key_or_null, val_or_node
def dump(self, buf, level): # pragma: no cover
buf.append(
' ' * (level + 1) +
'BitmapNode(size={} count={} bitmap={} id={:0x}):'.format(
self.size, self.size / 2, bin(self.bitmap), id(self)))
for i in range(0, self.size, 2):
key_or_null = self.array[i]
val_or_node = self.array[i + 1]
pad = ' ' * (level + 2)
if key_or_null is _NULL:
buf.append(pad + 'NULL:')
val_or_node.dump(buf, level + 2)
else:
buf.append(pad + '{!r}: {!r}'.format(key_or_null, val_or_node))
class CollisionNode:
def __init__(self, size, hash, array, mutid):
self.size = size
self.hash = hash
self.array = array
self.mutid = mutid
def find_index(self, key):
for i in range(0, self.size, 2):
if self.array[i] == key:
return i
return -1
def find(self, shift, hash, key):
for i in range(0, self.size, 2):
if self.array[i] == key:
return self.array[i + 1]
raise KeyError(key)
def assoc(self, shift, hash, key, val, mutid):
if hash == self.hash:
key_idx = self.find_index(key)
if key_idx == -1:
new_array = self.array.copy()
new_array.append(key)
new_array.append(val)
if mutid and mutid == self.mutid:
self.size += 2
self.array = new_array
return self, True
else:
new_node = CollisionNode(
self.size + 2, hash, new_array, mutid)
return new_node, True
val_idx = key_idx + 1
if self.array[val_idx] is val:
return self, False
if mutid and mutid == self.mutid:
self.array[val_idx] = val
return self, False
else:
new_array = self.array.copy()
new_array[val_idx] = val
return CollisionNode(self.size, hash, new_array, mutid), False
else:
new_node = BitmapNode(
2, map_bitpos(self.hash, shift), [_NULL, self], mutid)
return new_node.assoc(shift, hash, key, val, mutid)
def without(self, shift, hash, key, mutid):
if hash != self.hash:
return W_NOT_FOUND, None
key_idx = self.find_index(key)
if key_idx == -1:
return W_NOT_FOUND, None
new_size = self.size - 2
if new_size == 0:
# Shouldn't be ever reachable
return W_EMPTY, None # pragma: no cover
if new_size == 2:
if key_idx == 0:
new_array = [self.array[2], self.array[3]]
else:
assert key_idx == 2
new_array = [self.array[0], self.array[1]]
new_node = BitmapNode(
2, map_bitpos(hash, shift), new_array, mutid)
return W_NEWNODE, new_node
new_array = self.array[:key_idx]
new_array.extend(self.array[key_idx + 2:])
if mutid and mutid == self.mutid:
self.array = new_array
self.size -= 2
return W_NEWNODE, self
else:
new_node = CollisionNode(
self.size - 2, self.hash, new_array, mutid)
return W_NEWNODE, new_node
def keys(self):
for i in range(0, self.size, 2):
yield self.array[i]
def values(self):
for i in range(1, self.size, 2):
yield self.array[i]
def items(self):
for i in range(0, self.size, 2):
yield self.array[i], self.array[i + 1]
def dump(self, buf, level): # pragma: no cover
pad = ' ' * (level + 1)
buf.append(
pad + 'CollisionNode(size={} id={:0x}):'.format(
self.size, id(self)))
pad = ' ' * (level + 2)
for i in range(0, self.size, 2):
key = self.array[i]
val = self.array[i + 1]
buf.append('{}{!r}: {!r}'.format(pad, key, val))
class MapKeys:
def __init__(self, c, m):
self.__count = c
self.__root = m
def __len__(self):
return self.__count
def __iter__(self):
return iter(self.__root.keys())
class MapValues:
def __init__(self, c, m):
self.__count = c
self.__root = m
def __len__(self):
return self.__count
def __iter__(self):
return iter(self.__root.values())
class MapItems:
def __init__(self, c, m):
self.__count = c
self.__root = m
def __len__(self):
return self.__count
def __iter__(self):
return iter(self.__root.items())
class Map:
def __init__(self, *args, **kw):
if not args:
col = None
elif len(args) == 1:
col = args[0]
else:
raise TypeError(
"immutables.Map expected at most 1 arguments, "
"got {}".format(len(args))
)
self.__count = 0
self.__root = BitmapNode(0, 0, [], 0)
self.__hash = -1
if isinstance(col, Map):
self.__count = col.__count
self.__root = col.__root
self.__hash = col.__hash
col = None
elif isinstance(col, MapMutation):
raise TypeError('cannot create Maps from MapMutations')
if col or kw:
init = self.update(col, **kw)
self.__count = init.__count
self.__root = init.__root
@classmethod
def _new(cls, count, root):
m = Map.__new__(Map)
m.__count = count
m.__root = root
m.__hash = -1
return m
def __reduce__(self):
return (type(self), (dict(self.items()),))
def __len__(self):
return self.__count
def __eq__(self, other):
if not isinstance(other, Map):
return NotImplemented
if len(self) != len(other):
return False
for key, val in self.__root.items():
try:
oval = other.__root.find(0, map_hash(key), key)
except KeyError:
return False
else:
if oval != val:
return False
return True
def update(self, *args, **kw):
if not args:
col = None
elif len(args) == 1:
col = args[0]
else:
raise TypeError(
"update expected at most 1 arguments, got {}".format(len(args))
)
it = None
if col is not None:
if hasattr(col, 'items'):
it = iter(col.items())
else:
it = iter(col)
if it is not None:
if kw:
it = iter(itertools.chain(it, kw.items()))
else:
if kw:
it = iter(kw.items())
if it is None:
return self
mutid = _mut_id()
root = self.__root
count = self.__count
i = 0
while True:
try:
tup = next(it)
except StopIteration:
break
try:
tup = tuple(tup)
except TypeError:
raise TypeError(
'cannot convert map update '
'sequence element #{} to a sequence'.format(i)) from None
key, val, *r = tup
if r:
raise ValueError(
'map update sequence element #{} has length '
'{}; 2 is required'.format(i, len(r) + 2))
root, added = root.assoc(0, map_hash(key), key, val, mutid)
if added:
count += 1
i += 1
return Map._new(count, root)
def mutate(self):
return MapMutation(self.__count, self.__root)
def set(self, key, val):
new_count = self.__count
new_root, added = self.__root.assoc(0, map_hash(key), key, val, 0)
if new_root is self.__root:
assert not added
return self
if added:
new_count += 1
return Map._new(new_count, new_root)
def delete(self, key):
res, node = self.__root.without(0, map_hash(key), key, 0)
if res is W_EMPTY:
return Map()
elif res is W_NOT_FOUND:
raise KeyError(key)
else:
return Map._new(self.__count - 1, node)
def get(self, key, default=None):
try:
return self.__root.find(0, map_hash(key), key)
except KeyError:
return default
def __getitem__(self, key):
return self.__root.find(0, map_hash(key), key)
def __contains__(self, key):
try:
self.__root.find(0, map_hash(key), key)
except KeyError:
return False
else:
return True
def __iter__(self):
yield from self.__root.keys()
def keys(self):
return MapKeys(self.__count, self.__root)
def values(self):
return MapValues(self.__count, self.__root)
def items(self):
return MapItems(self.__count, self.__root)
def __hash__(self):
if self.__hash != -1:
return self.__hash
MAX = sys.maxsize
MASK = 2 * MAX + 1
h = 1927868237 * (self.__count * 2 + 1)
h &= MASK
for key, value in self.__root.items():
hx = hash(key)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
hx = hash(value)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1 # pragma: no cover
if h == -1:
h = 590923713 # pragma: no cover
self.__hash = h
return h
@reprlib.recursive_repr("{...}")
def __repr__(self):
items = []
for key, val in self.items():
items.append("{!r}: {!r}".format(key, val))
return 'immutables.Map({{{}}})'.format(', '.join(items))
def __dump__(self): # pragma: no cover
buf = []
self.__root.dump(buf, 0)
return '\n'.join(buf)
def __class_getitem__(cls, item):
return cls
class MapMutation:
def __init__(self, count, root):
self.__count = count
self.__root = root
self.__mutid = _mut_id()
def set(self, key, val):
self[key] = val
def __enter__(self):
return self
def __exit__(self, *exc):
self.finish()
return False
def __iter__(self):
raise TypeError('{} is not iterable'.format(type(self)))
def __delitem__(self, key):
if self.__mutid == 0:
raise ValueError('mutation {!r} has been finished'.format(self))
res, new_root = self.__root.without(
0, map_hash(key), key, self.__mutid)
if res is W_EMPTY:
self.__count = 0
self.__root = BitmapNode(0, 0, [], self.__mutid)
elif res is W_NOT_FOUND:
raise KeyError(key)
else:
self.__root = new_root
self.__count -= 1
def __setitem__(self, key, val):
if self.__mutid == 0:
raise ValueError('mutation {!r} has been finished'.format(self))
self.__root, added = self.__root.assoc(
0, map_hash(key), key, val, self.__mutid)
if added:
self.__count += 1
def pop(self, key, *args):
if self.__mutid == 0:
raise ValueError('mutation {!r} has been finished'.format(self))
if len(args) > 1:
raise TypeError(
'pop() accepts 1 to 2 positional arguments, '
'got {}'.format(len(args) + 1))
elif len(args) == 1:
default = args[0]
else:
default = void
val = self.get(key, default)
try:
del self[key]
except KeyError:
if val is void:
raise
return val
else:
assert val is not void
return val
def get(self, key, default=None):
try:
return self.__root.find(0, map_hash(key), key)
except KeyError:
return default
def __getitem__(self, key):
return self.__root.find(0, map_hash(key), key)
def __contains__(self, key):
try:
self.__root.find(0, map_hash(key), key)
except KeyError:
return False
else:
return True
def update(self, *args, **kw):
if not args:
col = None
elif len(args) == 1:
col = args[0]
else:
raise TypeError(
"update expected at most 1 arguments, got {}".format(len(args))
)
if self.__mutid == 0:
raise ValueError('mutation {!r} has been finished'.format(self))
it = None
if col is not None:
if hasattr(col, 'items'):
it = iter(col.items())
else:
it = iter(col)
if it is not None:
if kw:
it = iter(itertools.chain(it, kw.items()))
else:
if kw:
it = iter(kw.items())
if it is None:
return
root = self.__root
count = self.__count
i = 0
while True:
try:
tup = next(it)
except StopIteration:
break
try:
tup = tuple(tup)
except TypeError:
raise TypeError(
'cannot convert map update '
'sequence element #{} to a sequence'.format(i)) from None
key, val, *r = tup
if r:
raise ValueError(
'map update sequence element #{} has length '
'{}; 2 is required'.format(i, len(r) + 2))
root, added = root.assoc(0, map_hash(key), key, val, self.__mutid)
if added:
count += 1
i += 1
self.__root = root
self.__count = count
def finish(self):
self.__mutid = 0
return Map._new(self.__count, self.__root)
@reprlib.recursive_repr("{...}")
def __repr__(self):
items = []
for key, val in self.__root.items():
items.append("{!r}: {!r}".format(key, val))
return 'immutables.MapMutation({{{}}})'.format(', '.join(items))
def __len__(self):
return self.__count
def __reduce__(self):
raise TypeError("can't pickle {} objects".format(type(self).__name__))
def __hash__(self):
raise TypeError('unhashable type: {}'.format(type(self).__name__))
def __eq__(self, other):
if not isinstance(other, MapMutation):
return NotImplemented
if len(self) != len(other):
return False
for key, val in self.__root.items():
try:
oval = other.__root.find(0, map_hash(key), key)
except KeyError:
return False
else:
if oval != val:
return False
return True
collections.abc.Mapping.register(Map)
|
pylayers/antprop/tests/test_air.py | usmanwardag/pylayers | 143 | 11131050 | <gh_stars>100-1000
from pylayers.simul.link import *
import pdb
DL0 = DLink(L='testair0.lay')
DL1 = DLink(L='testair1.lay')
DL0.a = np.array([1,3,1])
DL0.b = np.array([8,1,2.5])
DL1.a = np.array([1,3,1])
DL1.b = np.array([8,1,2.5])
DL0.eval(force=1,cutoff=1,threshold=0.1)
DL1.eval(force=1,cutoff=1,threshold=0.1)
DL0.plt_cir()
DL1.plt_cir()
#pdb.set_trace()
#B = np.array([[0,1,0],[0,0,1],[1,0,0]])
#
## Sans mur d'air
## Bo0 3 x 3
#print("without air wall")
Bo0_0 = DL0.R[1]['Bo0'][:,:,0]
Bi_0 = DL0.R[1]['Bi'][:,:,:,0]
Bo_0 = DL0.R[1]['Bo'][:,:,:,0]
BiN_0 = DL0.R[1]['BiN'][:,:,0]
#print "Bo0"
#print Bo0
#print DL0.R[1]['B'][:,:,0,0]
##"print np.dot(Bo0[:,1:].T,Bi[:,1:,0])
##print np.dot(Bi[:,1:,0].T,Bo0[:,1:])
## get the indices of interactions
#linter = DL0.R[1]['rays'][0]
#for k in range(Bi.shape[2]):
# print "Bi"+str(k)
# print " ",Bi[:,:,k]
# print "Interaction"+str(k)
#
# print " ",DL0.R.I.I[0,linter[k],:,:]
# print "Bo"+str(k)
# print " ",Bo[:,:,k]
#print DL0.R[1]['B'][:,:,1,0]
##print np.dot(Bo[:,1:,0].T,BiN[:,1:])
##print np.dot(BiN[:,1:].T,Bo[:,1:,0])
#print "BiN"
#print BiN
#print ("\n")
#print("with air wall")
## Avec mur d'air
Bo0_1 = DL1.R[1]['Bo0'][:,:,0]
Bi_1 = DL1.R[1]['Bi'][:,:,:,0]
Bo_1 = DL1.R[1]['Bo'][:,:,:,0]
BiN_1 = DL1.R[1]['BiN'][:,:,0]
#print "Bo0"
#print Bo0
#print DL1.R[2]['B'][:,:,0,0]
#print np.dot(Bo0[:,1:].T,Bi[:,1:,0])
#print np.dot(Bi[:,1:,0].T,Bo0[:,1:])
## get the indices of interactions
#linter = DL1.R[2]['rays']
#for k in range(Bi.shape[2]):
# if k>0:
# print DL1.R[2]['B'][:,:,k,0]
# print "Bi"+str(k)
# print " ",Bi[:,:,k]
# print "Interaction"+str(k)
#
# print " ",DL1.R.I.I[0,linter[0,k],:,:]
# print "Bo"+str(k)
# print " ",Bo[:,:,k]
#print k
#print DL1.R[2]['B'][:,:,2,0]
##print np.dot(Bo[:,1:,0].T,BiN[:,1:])
##print np.dot(BiN[:,1:].T,Bo[:,1:,0])
#print "BiN"
#print BiN
##-------------------
## The problem
N0 = Bo0_0
N1 = Bi_0[:,:,0]
A = np.dot(N1.T,N0)
print( A)
M0 = Bo0_1
I = np.eye(3)
M1 = Bi_1[:,:,0]
M2 = Bo_1[:,:,0]
M3 = Bi_1[:,:,1]
T1 = np.dot(M1.T,M0)
T2 = np.dot(I,T1)
T3 = np.dot(M2,T2)
T4 = np.dot(M3.T,T3)
print( T4 )
#print np.dot(M2.T,M1)
A1 = DL0.R[1]['B'][:,:,0,0]
A2 = DL0.R[1]['B'][:,:,1,0]
R0 = DL0.R.I.I[0,1,:,:]
res0 = np.dot(A2,np.dot(R0,A1))
assert np.allclose(N0,M0)
assert np.allclose(N1,M3)
B1 = DL1.R[2]['B'][:,:,0,0]
B2 = DL1.R[2]['B'][:,:,1,0]
B3 = DL1.R[2]['B'][:,:,2,0]
R1 = DL1.R.I.I[0,2,:,:]
res1 = np.dot(B3,np.dot(R1,np.dot(B2,B1)))
print( res0-res1)
#U = Bi_1[:,:,0]
#V = Bo_1[:,:,0]
#B = np.dot(Bi_1[:,:,0].T,Bo0_1)
#C = np.dot(Bo_1[:,:,0].T,B.T)
#print np.dot(U,V.T)
#print C
#print "Bi",A
#print "Bo",B
#print "BiBo",np.dot(A,B)
#
##for k in DL1.R:
## print "groupe d'interactions ",k
## Bo0 = DL1.R[k]['Bo0']
## Bi = DL1.R[k]['Bi']
## Bo = DL1.R[k]['Bo']
## BiN = DL1.R[k]['BiN']
## nray = Bi.shape[3]
## for ir in range(nray):
## for il in range(k):
## mBi = Bi[:,:,il,ir]
## mBo = Bo[:,:,il,ir]
## print np.dot(mBo[:,1:].T,mBi[:,1:])
## #dmBi = np.linalg.det(mBi)
## #dmBo = np.linalg.det(mBo)
## #print dmBi,dmBo
## #print np.linalg.det(np.dot(Bi[:,:,il,ir],B.T))
## #print np.linalg.det(np.dot(Bo[:,:,il,ir],B.T))
##
###tind = []
###for tau in DL0.H.taud:
### if tau in DL1.H.taud:
### u = np.where(DL1.H.taud==tau)[0]
### v = np.where(DL0.H.taud==tau)[0]
### try:
### diff = np.abs(DL1.H.y[u]-DL0.H.y[v]).squeeze()
### if diff > 1e-10:
### tind.append(zip(u,v))
### except:
### pass
|
wandb/vendor/prompt_toolkit/styles/from_pygments.py | dreamflasher/client | 6,989 | 11131052 | """
Adaptor for building prompt_toolkit styles, starting from a Pygments style.
Usage::
from pygments.styles.tango import TangoStyle
style = style_from_pygments(pygments_style_cls=TangoStyle)
"""
from __future__ import unicode_literals
from .base import Style
from .from_dict import style_from_dict
__all__ = (
'PygmentsStyle',
'style_from_pygments',
)
# Following imports are only needed when a ``PygmentsStyle`` class is used.
try:
from pygments.style import Style as pygments_Style
from pygments.styles.default import DefaultStyle as pygments_DefaultStyle
except ImportError:
pygments_Style = None
pygments_DefaultStyle = None
def style_from_pygments(style_cls=pygments_DefaultStyle,
style_dict=None,
include_defaults=True):
"""
Shortcut to create a :class:`.Style` instance from a Pygments style class
and a style dictionary.
Example::
from prompt_toolkit.styles.from_pygments import style_from_pygments
from pygments.styles import get_style_by_name
style = style_from_pygments(get_style_by_name('monokai'))
:param style_cls: Pygments style class to start from.
:param style_dict: Dictionary for this style. `{Token: style}`.
:param include_defaults: (`bool`) Include prompt_toolkit extensions.
"""
assert style_dict is None or isinstance(style_dict, dict)
assert style_cls is None or issubclass(style_cls, pygments_Style)
styles_dict = {}
if style_cls is not None:
styles_dict.update(style_cls.styles)
if style_dict is not None:
styles_dict.update(style_dict)
return style_from_dict(styles_dict, include_defaults=include_defaults)
class PygmentsStyle(Style):
" Deprecated. "
def __new__(cls, pygments_style_cls):
assert issubclass(pygments_style_cls, pygments_Style)
return style_from_dict(pygments_style_cls.styles)
def invalidation_hash(self):
pass
@classmethod
def from_defaults(cls, style_dict=None,
pygments_style_cls=pygments_DefaultStyle,
include_extensions=True):
" Deprecated. "
return style_from_pygments(
style_cls=pygments_style_cls,
style_dict=style_dict,
include_defaults=include_extensions)
|
tests/unit_test/test_java.py | Lufedi/reaper | 106 | 11131062 | import os
import unittest
from attributes.unit_test.discoverer import get_test_discoverer
from tests import get_lsloc, REPOS_PATH
class JavaTestDiscovererTestCase(unittest.TestCase):
def setUp(self):
self.discoverer = get_test_discoverer('Java')
@unittest.skipIf(not os.path.exists(REPOS_PATH), 'setup.sh not run.')
def test_discover(self):
# Test: Project using JUnit
path = os.path.join(REPOS_PATH, 'maven')
proportion = self.discoverer.discover(path)
self.assertLess(0, proportion)
# Test: Project with no unit tests (when these tests were written)
path = os.path.join(REPOS_PATH, 'MPAndroidChart')
proportion = self.discoverer.discover(path)
self.assertEqual(0, proportion)
# Test: Project in Ruby to simulate a project with no C source code
path = os.path.join(REPOS_PATH, 'squib')
proportion = self.discoverer.discover(path)
self.assertEqual(0, proportion)
@unittest.skipIf(not os.path.exists(REPOS_PATH), 'setup.sh not run.')
def test_junit(self):
# Test: Project using JUnit
path = os.path.join(REPOS_PATH, 'maven')
proportion = self.discoverer.__junit__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertLess(0, proportion)
# Test: Project using JUnit without mention in pom.xml
path = os.path.join(REPOS_PATH, 'cassandra')
proportion = self.discoverer.__junit__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertLess(0, proportion)
# Test: Project not using JUnit
path = os.path.join(REPOS_PATH, 'MPAndroidChart')
proportion = self.discoverer.__junit__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertEqual(0, proportion)
@unittest.skipIf(not os.path.exists(REPOS_PATH), 'setup.sh not run.')
def test_testng(self):
# Test: Project using TestNG
path = os.path.join(REPOS_PATH, 'SimianArmy')
proportion = self.discoverer.__testng__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertLess(0, proportion)
# Test: Project not using TestNG
path = os.path.join(REPOS_PATH, 'maven')
proportion = self.discoverer.__testng__(
path, get_lsloc(path, self.discoverer.languages)
)
self.assertEqual(0, proportion)
|
news/spiders/ip.py | GingerWWW/news_spider | 208 | 11131074 | # -*- coding: utf-8 -*-
import scrapy
class IpSpider(scrapy.Spider):
"""
IP代理测试 蜘蛛
重试3次,每次超时10秒
使用:
进入项目目录
$ scrapy crawl ip
"""
name = "ip"
allowed_domains = ["ip.cn"]
start_urls = (
'https://ip.cn',
)
custom_settings = dict(
COOKIES_ENABLED=True,
DEFAULT_REQUEST_HEADERS={
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:57.0) Gecko/20100101 Firefox/57.0'
},
USER_AGENT='Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:57.0) Gecko/20100101 Firefox/57.0',
DOWNLOADER_MIDDLEWARES={
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'news.middlewares.useragent.UserAgentMiddleware': 500,
'news.middlewares.httpproxy.HttpProxyMiddleware': 720, # 代理(cookie需要与代理IP关联)
},
ITEM_PIPELINES={
'news.pipelines.store_mysql.StoreMysqlPipeline': 450,
},
DOWNLOAD_TIMEOUT=10
)
def parse(self, response):
info = response.xpath('//div[@class="well"]//code/text()').extract()
ip_info = dict(zip(['ip', 'address'], info))
yield ip_info
|
sumatra/dependency_finder/genesis.py | usnistgov/corr-sumatra | 143 | 11131079 | """
GENESIS-specific functions for finding information about dependencies.
Classes
-------
Dependency - contains information about a .g or .p file, and tries to determine
version information.
Functions
---------
find_version_from_versioncontrol() - determines whether a GENESIS file is
under version control, and if so, obtains
version information from this.
find_included_files() - finds all included .g files for a given GENESIS file.
find_dependencies() - returns a list of Dependency objects representing
all the files imported by a given GENESIS file.
Module variables
----------------
heuristics - a list of functions that will be called in sequence by
find_version()
:copyright: Copyright 2006-2015 by the Sumatra team, see doc/authors.txt
:license: BSD 2-clause, see LICENSE for details.
"""
from __future__ import with_statement
from __future__ import print_function
from __future__ import unicode_literals
from builtins import range
import re
import os
from sumatra.dependency_finder import core
class Dependency(core.BaseDependency):
"""
Contains information about a .g file, and tries to determine version information.
"""
module = 'genesis'
def __init__(self, name, path=None, version='unknown', diff='', source=None):
# name maybe should be path relative to main file?
super(Dependency, self).__init__(os.path.basename(name),
path or os.path.abspath(name),
version, diff, source)
def get_sim_path():
"""
Obtain the SIMPATH by parsing ~/.simrc
"""
# this is rather hacky, to say the least
with open(os.path.expanduser("~/.simrc")) as fd:
content = fd.read().replace("\\\n", "")
lines = [line[15:] for line in content.split("\n") if "setenv SIMPATH" in line]
if len(lines) > 1:
for i in range(1, len(lines)):
lines[i] = lines[i].replace("{getenv SIMPATH}", lines[0])
return lines[-1].split()
def find_included_files(file_path):
"""
Find all files that are included, whether directly or indirectly, by a given
.g file.
"""
comment_pattern = re.compile('/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/') # see http://ostermiller.org/findcomment.html
include_pattern = re.compile(r'include (?P<path>[\w\./]+)')
search_dirs = get_sim_path()
all_paths = []
def find(start_path, paths):
"""
Recursively look for files loaded by start_path, add them to paths.
"""
with open(start_path) as f:
without_comments = comment_pattern.sub("", f.read())
new_paths = include_pattern.findall(without_comments)
def add_ext(path):
if path[-2:] != ".g":
path += ".g"
return path
new_paths = (add_ext(p) for p in new_paths)
curdir = os.path.dirname(start_path)
new_paths = [core.find_file(p, curdir, search_dirs) for p in new_paths]
if new_paths:
print("%s loads the following:\n %s" % (start_path,
"\n ".join(new_paths)))
else:
print("%s loads no files" % start_path)
paths.extend(new_paths)
for path in new_paths:
find(path, paths)
find(file_path, all_paths)
return set(all_paths)
def find_dependencies(filename, executable):
"""
Return a list of Dependency objects representing all files included,
whether directly or indirectly, by a given .g file.
"""
heuristics = [core.find_versions_from_versioncontrol, ]
paths = find_included_files(filename)
# also need to find .p files
dependencies = [Dependency(name) for name in paths]
return core.find_versions(dependencies, heuristics)
|
data/recognition_dataset.py | BradyFU/DVG | 102 | 11131081 | import os
import copy
import math
import time
import random
import numpy as np
from PIL import Image
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
def default_loader(path):
img = Image.open(path).convert('L')
return img
def default_list_reader(fileList):
imgList = []
with open(fileList, 'r') as file:
for line in file.readlines():
imgPath, label, domain = line.strip().split(' ')
imgList.append((imgPath, int(label), int(domain)))
return imgList
class ImageList(data.Dataset):
def __init__(self, root, fileList, list_reader=default_list_reader, loader=default_loader):
self.root = root
self.imgList = list_reader(fileList)
self.loader = loader
self.transform = transforms.Compose([
transforms.RandomCrop(128),
transforms.ToTensor(),
])
def __getitem__(self, index):
imgPath, target, domain = self.imgList[index]
img = self.loader(os.path.join(self.root, imgPath))
img = self.transform(img)
return {'img':img, 'label': target, 'domain_flag': domain}
def __len__(self):
return len(self.imgList)
class SeparateBatchSampler(object):
def __init__(self, real_data_idx, fake_data_idx, batch_size=128, ratio=0.5, put_back=False):
self.batch_size = batch_size
self.ratio = ratio
self.real_data_num = len(real_data_idx)
self.fake_data_num = len(fake_data_idx)
self.max_num_image = max(self.real_data_num, self.fake_data_num)
self.real_data_idx = real_data_idx
self.fake_data_idx = fake_data_idx
self.processed_idx = copy.deepcopy(self.real_data_idx)
def __len__(self):
return self.max_num_image // (int(self.batch_size * self.ratio))
def __iter__(self):
batch_size_real_data = int(math.floor(self.ratio * self.batch_size))
batch_size_fake_data = self.batch_size - batch_size_real_data
self.processed_idx = copy.deepcopy(self.real_data_idx)
rand_real_data_idx = np.random.permutation(len(self.real_data_idx) // 2)
for i in range(self.__len__()):
batch = []
idx_fake_data = random.sample(self.fake_data_idx, batch_size_fake_data // 2)
for j in range(batch_size_real_data // 2):
idx = rand_real_data_idx[(i * batch_size_real_data + j) % (self.real_data_num // 2)]
batch.append(self.processed_idx[2 * idx])
batch.append(self.processed_idx[2 * idx + 1])
for idx in idx_fake_data:
batch.append(2 * idx + self.real_data_num)
batch.append(2 * idx + 1 + self.real_data_num)
yield batch
class SeparateImageList(data.Dataset):
def __init__(self, real_data_path, real_list_path, fake_data_path, fake_total_num, ratio=0.5):
self.transform = transforms.Compose([
transforms.RandomCrop(128),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
# load real nir/vis data
real_data_list, real_data_idx = self.list_reader(real_data_path, real_list_path)
# load fake nir/vis data from noise
_idx = np.random.permutation(fake_total_num)
fake_data_list = []
fake_data_idx = []
for i in range(0, fake_total_num):
_fake_img_name = str(_idx[i] + 1) + '.jpg'
# nir_noise and vis_noise are the path of the fake data
_fake_img_nir_name = 'nir_noise/' + _fake_img_name
_fake_img_vis_name = 'vis_noise/' + _fake_img_name
_fake_img_nir_path = os.path.join(fake_data_path, _fake_img_nir_name)
_fake_img_vis_path = os.path.join(fake_data_path, _fake_img_vis_name)
fake_data_list.append((_fake_img_nir_path, -1, 0))
fake_data_list.append((_fake_img_vis_path, -1, 1))
fake_data_idx.append(i)
self.real_data_idx = real_data_idx
self.fake_data_idx = fake_data_idx
real_data_list.extend(fake_data_list)
self.all_list = real_data_list
self.ratio = ratio
print('real: {}, fake: {}, total: {}, ratio: {}\n'.format(len(self.real_data_idx), len(self.fake_data_idx), len(self.all_list), self.ratio))
def get_idx(self):
return self.real_data_idx, self.fake_data_idx
def list_reader(self, root_path, fileList):
imgList = []
imgIdx = []
img_index = 0
with open(fileList, 'r') as file:
for line in file.readlines():
img_name, label, domain = line.strip().split(' ')
img_path = os.path.join(root_path, img_name)
imgList.append((img_path, int(label), int(domain)))
imgIdx.append(img_index)
img_index += 1
return imgList, imgIdx
def loader(self, path):
img = Image.open(path).convert('L')
return img
def __getitem__(self, index):
imgPath, target, domain = self.all_list[index]
img = self.loader(imgPath)
img = self.transform(img)
return {'img': img, 'label': target, 'domain_flag': domain}
def __len__(self):
return len(self.all_list)
|
googleanalytics/tests/__init__.py | ruber0id/google-analytics | 170 | 11131082 | <filename>googleanalytics/tests/__init__.py
# encoding: utf-8
"""
These unit tests are somewhat limited in scope because they need
to work with any Google Analytics data. Therefore, we mainly test
for coherence and whether various functions return the proper
data structure, rather than whether the results are exactly
such or so.
Before you can run these tests, create a "sandbox" project at
https://console.developers.google.com/ and run `gash auth`
to authenticate against it. Your human-readable account name
should be `pyga-unittest`.
The account you're using for these unit tests should have
at least one Google Analytics domain set up.
"""
import googleanalytics as ga
import unittest
import datetime
from . import meta, query, report
|
backend/examples/migrations/0006_alter_example_upload_name.py | arcada-uas/doccano | 2,082 | 11131085 | <gh_stars>1000+
# Generated by Django 4.0.2 on 2022-04-05 02:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("examples", "0005_auto_20220405_0252"),
]
operations = [
migrations.AlterField(
model_name="example",
name="upload_name",
field=models.CharField(max_length=512),
),
]
|
test/intervals.py | davemarr621/interval_tree_1 | 488 | 11131124 | """
intervaltree: A mutable, self-balancing interval tree for Python 2 and 3.
Queries may be by point, by range overlap, or by range envelopment.
Test module: utilities to generate intervals
Copyright 2013-2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from intervaltree import Interval
from pprint import pprint
from random import randint, choice
from test.progress_bar import ProgressBar
import os
try:
xrange
except NameError:
xrange = range
try:
unicode
except NameError:
unicode = str
def make_iv(begin, end, label=False):
if label:
return Interval(begin, end, "[{0},{1})".format(begin, end))
else:
return Interval(begin, end)
def nogaps_rand(size=100, labels=False):
"""
Create a random list of Intervals with no gaps or overlaps
between the intervals.
:rtype: list of Intervals
"""
cur = -50
result = []
for i in xrange(size):
length = randint(1, 10)
result.append(make_iv(cur, cur + length, labels))
cur += length
return result
def gaps_rand(size=100, labels=False):
"""
Create a random list of intervals with random gaps, but no
overlaps between the intervals.
:rtype: list of Intervals
"""
cur = -50
result = []
for i in xrange(size):
length = randint(1, 10)
if choice([True, False]):
cur += length
length = randint(1, 10)
result.append(make_iv(cur, cur + length, labels))
cur += length
return result
def overlaps_nogaps_rand(size=100, labels=False):
l1 = nogaps_rand(size, labels)
l2 = nogaps_rand(size, labels)
result = set(l1) | set(l2)
return list(result)
def write_ivs_data(name, ivs, docstring='', imports=None):
"""
Write the provided ivs to test/name.py.
:param name: file name, minus the extension
:type name: str
:param ivs: an iterable of Intervals
:type ivs: collections.i
:param docstring: a string to be inserted at the head of the file
:param imports: executable code to be inserted before data=...
"""
def trepr(s):
"""
Like repr, but triple-quoted. NOT perfect!
Taken from http://compgroups.net/comp.lang.python/re-triple-quoted-repr/1635367
"""
text = '\n'.join([repr(line)[1:-1] for line in s.split('\n')])
squotes, dquotes = "'''", '"""'
my_quotes, other_quotes = dquotes, squotes
if my_quotes in text:
if other_quotes in text:
escaped_quotes = 3*('\\' + other_quotes[0])
text = text.replace(other_quotes, escaped_quotes)
else:
my_quotes = other_quotes
return "%s%s%s" % (my_quotes, text, my_quotes)
data = [tuple(iv) for iv in ivs]
with open('test/data/{0}.py'.format(name), 'w') as f:
if docstring:
f.write(trepr(docstring))
f.write('\n')
if isinstance(imports, (str, unicode)):
f.write(imports)
f.write('\n\n')
elif isinstance(imports, (list, tuple, set)):
for line in imports:
f.write(line + '\n')
f.write('\n')
f.write('data = \\\n')
pprint(data, f)
if __name__ == '__main__':
# ivs = gaps_rand()
# write_ivs_data('ivs3', ivs, docstring="""
# Random integer ranges, with gaps.
# """
# )
pprint(ivs)
|
cifar_pipeline/configs/fixup/wideresnet/fixup_0_0_1/100_layers.py | PavelOstyakov/pipeline | 214 | 11131131 | <reponame>PavelOstyakov/pipeline<filename>cifar_pipeline/configs/fixup/wideresnet/fixup_0_0_1/100_layers.py
from ..base import ConfigWideResNetBase
class Config(ConfigWideResNetBase):
def __init__(self):
super().__init__(num_layers=100, fixup_coeff=0.01, normalization_type=ConfigWideResNetBase.FIXUP)
|
tests/test_kinesis/test_kinesis_cloudformation.py | gtourkas/moto | 5,460 | 11131142 | <gh_stars>1000+
import boto3
import sure # noqa # pylint: disable=unused-import
from moto import mock_kinesis, mock_cloudformation
@mock_cloudformation
def test_kinesis_cloudformation_create_stream():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = '{"Resources":{"MyStream":{"Type":"AWS::Kinesis::Stream"}}}'
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[
"StackResourceSummaries"
][0]
provisioned_resource["LogicalResourceId"].should.equal("MyStream")
len(provisioned_resource["PhysicalResourceId"]).should.be.greater_than(0)
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_get_attr():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Outputs:
StreamName:
Value: !Ref TheStream
StreamArn:
Value: !GetAtt TheStream.Arn
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
output_stream_name = [
output["OutputValue"]
for output in stack_description["Outputs"]
if output["OutputKey"] == "StreamName"
][0]
output_stream_arn = [
output["OutputValue"]
for output in stack_description["Outputs"]
if output["OutputKey"] == "StreamArn"
][0]
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName=output_stream_name)[
"StreamDescription"
]
output_stream_arn.should.equal(stream_description["StreamARN"])
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_update():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
Name: MyStream
ShardCount: 4
RetentionPeriodHours: 48
Tags:
- Key: TagKey1
Value: TagValue1
- Key: TagKey2
Value: TagValue2
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
stack_description["StackName"].should.equal(stack_name)
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["RetentionPeriodHours"].should.equal(48)
tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"]
tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"]
tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"]
tag1_value.should.equal("TagValue1")
tag2_value.should.equal("TagValue2")
shards_provisioned = len(
[
shard
for shard in stream_description["Shards"]
if "EndingSequenceNumber" not in shard["SequenceNumberRange"]
]
)
shards_provisioned.should.equal(4)
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
ShardCount: 6
RetentionPeriodHours: 24
Tags:
- Key: TagKey1
Value: TagValue1a
- Key: TagKey2
Value: TagValue2a
""".strip()
cf_conn.update_stack(StackName=stack_name, TemplateBody=template)
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["RetentionPeriodHours"].should.equal(24)
tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"]
tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"]
tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"]
tag1_value.should.equal("TagValue1a")
tag2_value.should.equal("TagValue2a")
shards_provisioned = len(
[
shard
for shard in stream_description["Shards"]
if "EndingSequenceNumber" not in shard["SequenceNumberRange"]
]
)
shards_provisioned.should.equal(6)
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_delete():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
Name: MyStream
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
stack_description["StackName"].should.equal(stack_name)
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["StreamName"].should.equal("MyStream")
cf_conn.delete_stack(StackName=stack_name)
streams = kinesis_conn.list_streams()["StreamNames"]
len(streams).should.equal(0)
|
dashboard/dashboard/pinpoint/models/change/repository_test.py | tingshao/catapult | 1,894 | 11131153 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from dashboard.pinpoint.models.change import repository
from dashboard.pinpoint import test
class RepositoryTest(test.TestCase):
def testRepositoryUrl(self):
self.assertEqual(repository.RepositoryUrl('chromium'), test.CHROMIUM_URL)
def testRepositoryUrlRaisesWithUnknownName(self):
with self.assertRaises(KeyError):
repository.RepositoryUrl('not chromium')
def testRepository(self):
name = repository.RepositoryName(test.CHROMIUM_URL + '.git')
self.assertEqual(name, 'chromium')
def testRepositoryRaisesWithUnknownUrl(self):
with self.assertRaises(KeyError):
repository.RepositoryName('https://googlesource.com/nonexistent/repo')
def testAddRepository(self):
name = repository.RepositoryName(
'https://example/repo', add_if_missing=True)
self.assertEqual(name, 'repo')
self.assertEqual(repository.RepositoryUrl('repo'), 'https://example/repo')
self.assertEqual(repository.RepositoryName('https://example/repo'), 'repo')
def testAddRepositoryRaisesWithDuplicateName(self):
with self.assertRaises(AssertionError):
repository.RepositoryName('https://example/chromium', add_if_missing=True)
|
etl/parsers/etw/Microsoft_Windows_TPM_WMI.py | IMULMUL/etl-parser | 104 | 11131155 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-TPM-WMI
GUID : 7d5387b0-cbe0-11da-a94d-0800200c9a66
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=514, version=0)
class Microsoft_Windows_TPM_WMI_514_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=517, version=0)
class Microsoft_Windows_TPM_WMI_517_0(Etw):
pattern = Struct(
"HResult" / Int32sl
)
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=518, version=0)
class Microsoft_Windows_TPM_WMI_518_0(Etw):
pattern = Struct(
"HResult" / Int32sl
)
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=519, version=0)
class Microsoft_Windows_TPM_WMI_519_0(Etw):
pattern = Struct(
"ClearReason" / WString
)
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=769, version=0)
class Microsoft_Windows_TPM_WMI_769_0(Etw):
pattern = Struct(
"OldOSManagedAuthLevel" / Int32ul,
"NewOSManagedAuthLevel" / Int32ul
)
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=1026, version=0)
class Microsoft_Windows_TPM_WMI_1026_0(Etw):
pattern = Struct(
"ErrorCode" / Int32sl,
"StatusInformation" / Int32ul
)
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=1029, version=0)
class Microsoft_Windows_TPM_WMI_1029_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=1031, version=0)
class Microsoft_Windows_TPM_WMI_1031_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=1537, version=0)
class Microsoft_Windows_TPM_WMI_1537_0(Etw):
pattern = Struct(
"HealthAttestationServer" / WString
)
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=1538, version=0)
class Microsoft_Windows_TPM_WMI_1538_0(Etw):
pattern = Struct(
"HealthAttestationServer" / WString,
"HResult" / Int32sl
)
@declare(guid=guid("7d5387b0-cbe0-11da-a94d-0800200c9a66"), event_id=1539, version=0)
class Microsoft_Windows_TPM_WMI_1539_0(Etw):
pattern = Struct(
"HealthAttestationServer" / WString,
"HTTPStatus" / Int32sl,
"ServerResponse" / WString
)
|
benchmarks/heterogeneous_output_Lambdify.py | Midnighter/symengine.py | 133 | 11131182 | <reponame>Midnighter/symengine.py<filename>benchmarks/heterogeneous_output_Lambdify.py<gh_stars>100-1000
#!/usr/bin/env python
import os
from time import clock
import numpy as np
import sympy as sp
from sympy.parsing.sympy_parser import parse_expr
from sympy.parsing.sympy_parser import standard_transformations
import symengine as se
import warnings
src = os.path.join(os.path.dirname(__file__), '6_links_rhs.txt')
serial = open(src, 'tr').read()
parsed = parse_expr(serial, transformations=standard_transformations)
vec = sp.Matrix(1, 14, parsed)
args = tuple(sorted(vec.free_symbols, key=lambda arg: arg.name))
exprs = vec, vec.jacobian(args[:-14])
inp = np.ones(len(args))
assert inp.size == 26
lmb_sp = sp.lambdify(args, exprs, modules=['math', 'sympy'])
lmb_se = se.Lambdify(args, *exprs)
lmb_se_llvm = se.Lambdify(args, *exprs, backend='llvm')
lmb_sp(*inp)
tim_sympy = clock()
for i in range(500):
v, m = lmb_sp(*inp)
tim_sympy = clock() - tim_sympy
lmb_se(inp)
tim_se = clock()
for i in range(500):
v, m = lmb_se(inp)
tim_se = clock() - tim_se
lmb_se_llvm(inp)
tim_se_llvm = clock()
res_se_llvm = np.empty(len(exprs))
for i in range(500):
v, m = lmb_se_llvm(inp)
tim_se_llvm = clock() - tim_se_llvm
print('SymEngine (lambda double) speed-up factor (higher is better) vs sympy: %12.5g' %
(tim_sympy/tim_se))
print('symengine (LLVM) speed-up factor (higher is better) vs sympy: %12.5g' %
(tim_sympy/tim_se_llvm))
import itertools
from functools import reduce
from operator import mul
def ManualLLVM(inputs, *outputs):
outputs_ravel = list(itertools.chain(*outputs))
cb = se.Lambdify(inputs, outputs_ravel, backend="llvm")
def func(*args):
result = []
n = np.empty(len(outputs_ravel))
t = cb.unsafe_real(np.concatenate([arg.ravel() for arg in args]), n)
start = 0
for output in outputs:
elems = reduce(mul, output.shape)
result.append(n[start:start+elems].reshape(output.shape))
start += elems
return result
return func
lmb_se_llvm_manual = ManualLLVM(args, *exprs)
lmb_se_llvm_manual(inp)
tim_se_llvm_manual = clock()
for i in range(500):
v, m = lmb_se_llvm_manual(inp)
tim_se_llvm_manual = clock() - tim_se_llvm_manual
print('symengine (ManualLLVM) speed-up factor (higher is better) vs sympy: %12.5g' %
(tim_sympy/tim_se_llvm_manual))
if tim_se_llvm_manual < tim_se_llvm:
warnings.warn("Cython code for Lambdify.__call__ is slow.")
|
dask_image/dispatch/_dispatch_ndmorph.py | martinschorb/dask-image | 144 | 11131202 | # -*- coding: utf-8 -*-
import numpy as np
import scipy.ndimage
from ._dispatcher import Dispatcher
__all__ = [
"dispatch_binary_dilation",
"dispatch_binary_erosion",
"dispatch_binary_structure",
]
dispatch_binary_dilation = Dispatcher(name="dispatch_binary_dilation")
dispatch_binary_erosion = Dispatcher(name="dispatch_binary_erosion")
dispatch_binary_structure = Dispatcher(name='dispatch_binary_structure')
# ================== binary_dilation ==================
@dispatch_binary_dilation.register(np.ndarray)
def numpy_binary_dilation(*args, **kwargs):
return scipy.ndimage.binary_dilation
@dispatch_binary_dilation.register_lazy("cupy")
def register_cupy_binary_dilation():
import cupy
import cupyx.scipy.ndimage
@dispatch_binary_dilation.register(cupy.ndarray)
def cupy_binary_dilation(*args, **kwargs):
return cupyx.scipy.ndimage.binary_dilation
# ================== binary_erosion ==================
@dispatch_binary_erosion.register(np.ndarray)
def numpy_binary_erosion(*args, **kwargs):
return scipy.ndimage.binary_erosion
@dispatch_binary_erosion.register_lazy("cupy")
def register_cupy_binary_erosion():
import cupy
import cupyx.scipy.ndimage
@dispatch_binary_erosion.register(cupy.ndarray)
def cupy_binary_erosion(*args, **kwargs):
return cupyx.scipy.ndimage.binary_erosion
# ================== generate_binary_structure ==================
@dispatch_binary_structure.register(np.ndarray)
def numpy_binary_structure(*args, **kwargs):
return scipy.ndimage.generate_binary_structure
@dispatch_binary_structure.register_lazy("cupy")
def register_cupy_binary_structure():
import cupy
import cupyx.scipy.ndimage
@dispatch_binary_structure.register(cupy.ndarray)
def cupy_binary_structure(*args, **kwargs):
return cupyx.scipy.ndimage.generate_binary_structure
|
src/utils/time_ext.py | HaoJiangGuo/fp-server | 173 | 11131206 | # -*- coding: utf-8 -*-
import datetime
import re
import time
ZERO_TIME_DELTA = datetime.timedelta(0)
LOCAL_TIME_DELTA = datetime.timedelta(hours=8) # 本地时区偏差
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO_TIME_DELTA
def dst(self, dt):
return ZERO_TIME_DELTA
class LocalTimeZone(datetime.tzinfo):
def utcoffset(self, dt):
return LOCAL_TIME_DELTA
def dst(self, dt):
return ZERO_TIME_DELTA
def tzname(self, dt):
return '+08:00'
# singleton
UTC = UTC()
LocalTimeZone = LocalTimeZone()
date_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
)
datetime_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
def parse_date(value):
"""Parses a string(ISO_8601) and return a datetime.date.
Raises ValueError if the input is well formatted but not a valid date.
Returns None if the input isn't well formatted.
"""
match = date_re.match(value)
if match:
kw = {k: int(v) for k, v in match.groupdict().items()}
return datetime.date(**kw)
def parse_datetime(value):
"""Parses a string(ISO_8601) and return a datetime.datetime base UTC,
or parse datetime.datetime base other timezone and return a datetime.datetime base UTC timezone
"""
if isinstance(value, datetime.datetime):
if not value.tzinfo:
value = value.replace(tzinfo=LocalTimeZone)
return value.astimezone(UTC)
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = kw.pop('tzinfo')
tz = UTC
offset = 0
if tzinfo == 'Z':
offset = 0
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset = 60 * int(tzinfo[1:3]) + offset_mins
if tzinfo[0] == '-':
offset = -offset
else:
tz = LocalTimeZone
kw = {k: int(v) for k, v in kw.items() if v is not None}
kw['tzinfo'] = tz
dt = datetime.datetime(**kw)
dt += datetime.timedelta(minutes=offset)
return dt.astimezone(UTC)
def convert_zone(dt: datetime.datetime, tz_to=UTC, tz_default=LocalTimeZone):
"""
@param dt:
@param tz_to: 转换后的目标时区
@param tz_default: dt无时区信息时的默认时区
"""
if not dt.tzinfo:
dt = dt.replace(tzinfo=tz_default)
return dt.astimezone(tz_to)
def get_utc_time(dt: datetime.datetime = None, tz_default=LocalTimeZone):
"""
@param dt 为None时,返回当前时间
@param tz_default dt无时区信息时的默认时区
"""
if dt is None:
dt = datetime.datetime.now()
return convert_zone(dt, tz_default=tz_default)
def get_time_str(dt: datetime.datetime = None, tz_default=LocalTimeZone):
"""
@param dt 为None时,返回当前时间
@param tz_default dt无时区信息时的默认时区
"""
if not dt:
dt = datetime.datetime.now()
dt = convert_zone(dt, tz_default=tz_default)
time_str = dt.isoformat().split('+')[0]
return time_str + 'Z'
def get_date_str(dt: datetime.date = None):
"""
@param dt 为None时,返回当前日期
"""
if not dt:
dt = datetime.date.today()
return dt.isoformat()
def get_cur_timestamp():
""" 获取当前时间戳
"""
ts = int(time.time())
return ts
def get_cur_datetime_m():
""" 获取当前日期时间字符串,包含 年 + 月 + 日 + 时 + 分 + 秒 + 微妙
"""
today = datetime.datetime.today()
str_m = today.strftime('%Y%m%d%H%M%S%f')
return str_m
def get_datetime():
""" 获取日期时间字符串,包含 年 + 月 + 日 + 时 + 分 + 秒
"""
today = datetime.datetime.today()
str_dt = today.strftime('%Y%m%d%H%M%S')
return str_dt
def get_date(fmt='%Y%m%d', delta_day=0):
""" 获取日期字符串,包含 年 + 月 + 日
@param fmt 返回的日期格式
"""
day = datetime.datetime.today()
if delta_day:
day += datetime.timedelta(days=delta_day)
str_d = day.strftime(fmt)
return str_d
def date_str_to_dt(date_str=None, fmt='%Y%m%d', delta_day=0):
""" 日期字符串转换到datetime对象
@param date_str 日期字符串
@param fmt 日期字符串格式
@param delta_day 相对天数,<0减相对天数,>0加相对天数
"""
if not date_str:
dt = datetime.datetime.today()
else:
dt = datetime.datetime.strptime(date_str, fmt)
if delta_day:
dt += datetime.timedelta(days=delta_day)
return dt
def dt_to_date_str(dt=None, fmt='%Y%m%d', delta_day=0):
""" datetime对象转换到日期字符串
@param dt datetime对象
@param fmt 返回的日期字符串格式
@param delta_day 相对天数,<0减相对天数,>0加相对天数
"""
if not dt:
dt = datetime.datetime.today()
if delta_day:
dt += datetime.timedelta(days=delta_day)
str_d = dt.strftime(fmt)
return str_d
def ts_to_datetime_str(ts):
""" 将时间戳转换为日期时间格式,年-月-日 时:分:秒
@param ts 时间戳
"""
if not ts:
return '00-00-00 00:00:00'
dt = datetime.datetime.fromtimestamp(int(ts))
return dt.strftime('%Y-%m-%d %H:%M:%S')
def datetime_str_to_ts(dt_str, fmt='%Y-%m-%d %H:%M:%S'):
""" 将日期时间格式字符串转换成时间戳
@param dt_str 日期时间字符串
@param fmt 日期时间字符串格式
"""
ts = int(time.mktime(datetime.datetime.strptime(dt_str, fmt).timetuple()))
return ts
def current_timestamp(_int=True):
res = time.time()
if _int:
res = int(res)
return res
|
CommonTools/ParticleFlow/python/ParticleSelectors/pfElectronsFromVertex_cfi.py | ckamtsikis/cmssw | 852 | 11131225 | import FWCore.ParameterSet.Config as cms
pfElectronsFromVertex = cms.EDFilter(
"IPCutPFCandidateSelector",
src = cms.InputTag("pfAllElectrons"), # PFCandidate source
vertices = cms.InputTag("offlinePrimaryVertices"), # vertices source
d0Cut = cms.double(0.2), # transverse IP
dzCut = cms.double(0.5), # longitudinal IP
dtCut = cms.double(-1.0), # time
d0SigCut = cms.double(99.), # transverse IP significance
dzSigCut = cms.double(99.), # longitudinal IP significance
dtSigCut = cms.double(-1.0), # time significance
)
|
gen2-social-distancing/distance.py | ibaiGorordo/depthai-experiments | 381 | 11131243 | import itertools
import logging
import math
import cv2
log = logging.getLogger(__name__)
def calculate_distance(point1, point2):
x1, y1, z1 = point1
x2, y2, z2 = point2
dx, dy, dz = x1 - x2, y1 - y2, z1 - z2
distance = math.sqrt(dx ** 2 + dy ** 2 + dz ** 2)
return distance
class DistanceGuardian:
max_distance = 1
def parse_frame(self, frame, detections):
results = []
for i, detection1 in enumerate(detections):
for detection2 in detections[i+1:]:
point1 = detection1['depth_x'], detection1['depth_y'], detection1['depth_z']
point2 = detection2['depth_x'], detection2['depth_y'], detection2['depth_z']
distance = calculate_distance(point1, point2)
log.info("DG: {}".format(distance))
results.append({
'distance': distance,
'dangerous': distance < self.max_distance,
'detection1': detection1,
'detection2': detection2,
})
return results
class DistanceGuardianDebug(DistanceGuardian):
def parse_frame(self, frame, boxes):
results = super().parse_frame(frame, boxes)
overlay = frame.copy()
for result in results:
x1 = result['detection1']['x_min'] + (result['detection1']['x_max'] - result['detection1']['x_min']) // 2
y1 = result['detection1']['y_max']
x2 = result['detection2']['x_min'] + (result['detection2']['x_max'] - result['detection2']['x_min']) // 2
y2 = result['detection2']['y_max']
color = (0, 0, 255) if result['dangerous'] else (255, 0, 0)
cv2.ellipse(overlay, (x1, y1), (40, 10), 0, 0, 360, color, thickness=cv2.FILLED)
cv2.ellipse(overlay, (x2, y2), (40, 10), 0, 0, 360, color, thickness=cv2.FILLED)
cv2.line(overlay, (x1, y1), (x2, y2), color, 1)
label_size, baseline = cv2.getTextSize(str(round(result['distance'], 1)), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 1)
label_x = (x1 + x2 - label_size[0]) // 2
label_y = (y1 + y2 - label_size[1]) // 2
cv2.putText(overlay, str(round(result['distance'], 1)), (label_x, label_y), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color, 1)
frame[:] = cv2.addWeighted(overlay, 0.4, frame, 0.6, 0)
return results
|
tester/test_model/test_post_hist.py | bukun/TorCMS | 243 | 11131257 | # -*- coding:utf-8 -*-
from torcms.core import tools
from torcms.model.post_hist_model import MPostHist
from torcms.model.post_model import MPost
class TestMPostHist():
def setup(self):
print('setup 方法执行于本类中每条用例之前')
self.uid = ''
self.post_id = 'llk8'
def test_create_post_history(self):
self.tearDown()
p_d = {
'title': 'qqqii',
'cnt_md': 'qwqwqw',
'time_create': '1999',
'time_update': '2019',
'user_name': 'max',
'view_count': '1',
'logo': 'opps',
'memo': '',
'order': '1',
'kind': '1',
'valid': 1,
}
MPost().add_meta(self.post_id, p_d)
aa = MPost.get_by_uid(self.post_id)
tf = MPostHist.create_post_history(aa, aa)
assert tf
His = MPostHist.query_by_postid(self.post_id)
self.uid = His[0].uid
assert His[0].cnt_md == p_d['cnt_md']
self.tearDown()
def addHis(self, **kwargs):
p_d = {
'title': kwargs.get('title', 'iiiii'),
'cnt_md': kwargs.get('cnt_md', 'grgr'),
'time_create': kwargs.get('time_create', '1992'),
'time_update': kwargs.get('time_update', '1996070600'),
'user_name': kwargs.get('user_name', 'yuanyuan'),
'view_count': kwargs.get('view_count', 1),
'logo': kwargs.get('logo', 'prprprprpr'),
'memo': kwargs.get('memo', ''),
'order': kwargs.get('order', '1'),
'keywords': kwargs.get('keywords', ''),
'extinfo': kwargs.get('extinfo', {}),
'kind': kwargs.get('kind', '1'),
'valid': kwargs.get('valid', 1),
}
MPost().add_meta(self.post_id, p_d)
aa = MPost.get_by_uid(self.post_id)
MPostHist.create_post_history(aa, aa)
His = MPostHist.query_by_postid(self.post_id)
self.uid = His[0].uid
def test_get_by_uid(self):
p_t = {
'cnt_md': 'bbrreedd'
}
self.addHis(**p_t)
pp = MPostHist.get_by_uid(self.uid)
assert pp.cnt_md == p_t['cnt_md']
self.tearDown()
def test_update_cnt(self):
self.addHis()
post_data = {
'user_name': 'giser',
'cnt_md': 'gisersdfsdfsdf'
}
MPostHist.update_cnt(self.uid, post_data)
pp = MPostHist.get_by_uid(self.uid)
assert pp.cnt_md == post_data['cnt_md']
self.tearDown()
def test_query_by_postid(self):
p_t = {
'cnt_md': 'bbrreedd',
'user_name': 'ggggbabybaby'
}
self.addHis(**p_t)
aa = MPostHist.query_by_postid(self.post_id)
assert aa[0].cnt_md == p_t['cnt_md']
assert aa[0].user_name == p_t['user_name']
self.tearDown()
def test_get_last(self):
p_t = {
'cnt_md': 'bbrreedd',
'user_name': 'snow'
}
self.addHis(**p_t)
aa = MPostHist.get_last(self.post_id)
assert aa.user_name == p_t['user_name']
self.tearDown()
def test_delete(self):
aa = MPostHist.get_by_uid(self.uid)
assert aa == None
self.addHis()
aa = MPostHist.get_by_uid(self.uid)
assert aa.post_id == self.post_id
aa = MPostHist.delete(self.post_id)
assert aa == False
self.tearDown()
def tearDown(self):
print("function teardown")
tt = MPostHist.get_by_uid(self.uid)
if tt:
MPostHist.delete(tt.uid)
tt = MPost.get_by_uid(self.post_id)
if tt:
MPost.delete(tt.uid)
|
tools/point_cloud_vis.py | sisl/neat | 183 | 11131263 | import argparse
import numpy as np
import open3d as o3d
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str, default='../carla_results/auto_pilot_v3_42/eval_routes_06_12_23_30_25/lidar_360/0000.npy', help='npy point cloud')
def main():
pcd_npy = np.load(args.file)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pcd_npy[:,0:3])
print(np.asarray(pcd.points))
o3d.visualization.draw_geometries([pcd])
if __name__ == '__main__':
global args
args = parser.parse_args()
main() |
questions/valid-parenthesis-string/Solution.py | marcus-aurelianus/leetcode-solutions | 141 | 11131265 | """
Given a string containing only three types of characters: '(', ')' and '*', write a function to check whether this string is valid. We define the validity of a string by these rules:
Any left parenthesis '(' must have a corresponding right parenthesis ')'.
Any right parenthesis ')' must have a corresponding left parenthesis '('.
Left parenthesis '(' must go before the corresponding right parenthesis ')'.
'*' could be treated as a single right parenthesis ')' or a single left parenthesis '(' or an empty string.
An empty string is also valid.
Example 1:
Input: "()"
Output: True
Example 2:
Input: "(*)"
Output: True
Example 3:
Input: "(*))"
Output: True
Note:
The string size will be in the range [1, 100].
"""
class Solution:
def checkValidString(self, s: str) -> bool:
rg = [0, 0]
for c in s:
if c == '(':
rg = [rg[0] + 1, rg[1] + 1]
elif c == ')':
rg = [rg[0] - 1, rg[1] - 1]
else:
rg = [rg[0] - 1, rg[1] + 1]
if rg[1] < 0:
return False
if rg[0] > 0:
return False
rg = [0, 0]
for c in reversed(s):
if c == ')':
rg = [rg[0] + 1, rg[1] + 1]
elif c == '(':
rg = [rg[0] - 1, rg[1] - 1]
else:
rg = [rg[0] - 1, rg[1] + 1]
if rg[1] < 0:
return False
if rg[0] > 0:
return False
return True |
EventFilter/L1GlobalTriggerRawToDigi/test/L1GtPacker_cfg.py | ckamtsikis/cmssw | 852 | 11131283 | from __future__ import print_function
#
# cfg file to pack a GT DAQ record
#
# V <NAME> 2009-04-06
import FWCore.ParameterSet.Config as cms
# process
process = cms.Process('TestL1GtPacker')
###################### user choices ######################
# choose (pre)release
useRelease = 'CMSSW_3_1_0'
#useRelease = 'CMSSW_2_2_12'
# choose the type of sample used (True for RelVal, False for data)
useRelValSample = True
#useRelValSample=False
if useRelValSample == True :
# 2_2_X
#useGlobalTag = 'IDEAL_V12'
#useGlobalTag='STARTUP_V11'
# >= 3_1_0_pre11
useGlobalTag = 'MC_31X_V1'
#useGlobalTag = 'STARTUP31X_V1'
# RelVals
#useSample = 'RelValQCD_Pt_80_120'
useSample = 'RelValTTbar'
#useSample = 'RelValZTT'
else :
# < 3_1_0_pre11
#useGlobalTag = 'CRAFT_ALL_V11'
# >= 3_1_0_pre11
useGlobalTag = 'CRAFT0831X_V1'
#useGlobalTag = 'GR09_31X_V1P'
#useGlobalTag = 'GR09_31X_V1H'
# change to True to use local files
# the type of file should match the choice of useRelValSample and useGlobalTag
useLocalFiles = False
###################### end user choices ###################
# number of events to be processed and source file
process.maxEvents = cms.untracked.PSet(
input=cms.untracked.int32(10)
)
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
process.source = cms.Source ('PoolSource', fileNames=readFiles, secondaryFileNames=secFiles)
# type of sample used (True for RelVal, False for data)
if useRelValSample == True :
if useGlobalTag.count('IDEAL') or useGlobalTag.count('MC') :
if (useRelease == 'CMSSW_3_1_0') and (useSample == 'RelValQCD_Pt_80_120') :
dataset = cms.untracked.vstring('/RelValQCD_Pt_80_120/CMSSW_3_1_0-MC_31X_V1-v1/GEN-SIM-DIGI-RAW-HLTDEBUG')
readFiles.extend( [
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0002/CC0C544D-DF66-DE11-B3F7-0019DB29C620.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/F497B45F-6E66-DE11-BD38-000423D174FE.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/EAD57B4C-6866-DE11-B1F6-001D09F28C1E.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/CEA3E29A-6A66-DE11-AA09-001D09F24DA8.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/CE0E1F78-6D66-DE11-BA57-0019B9F72BFF.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/C6C7688D-6C66-DE11-9B55-001D09F24EC0.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/C41FF427-6E66-DE11-9E69-001D09F28F11.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/BA1F3621-6F66-DE11-8301-000423D992A4.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/AACEAEAF-6E66-DE11-8D81-001D09F23A6B.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/A6A671F1-6D66-DE11-BA00-001D09F25325.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/8A5DCC07-6C66-DE11-9E14-001D09F24448.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/669667C8-6E66-DE11-A989-001D09F25393.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/5EFC7C08-6666-DE11-B41E-0019B9F581C9.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/5CF56885-6F66-DE11-AD32-001D09F2543D.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/54A57152-6F66-DE11-A36F-001D09F290BF.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/2ADE643E-6D66-DE11-BE7A-001D09F24448.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/28483F30-6B66-DE11-B045-001617DBD224.root'
] );
elif (useRelease == 'CMSSW_2_2_12') and (useSample == 'RelValQCD_Pt_80_120') :
dataset = cms.untracked.vstring('/RelValQCD_Pt_80_120/CMSSW_2_2_10_IDEAL_V12_v1/GEN-SIM-DIGI-RAW-HLTDEBUG')
readFiles.extend( [
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/FC798D81-CB3D-DE11-9FBF-001D09F290BF.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/F80D1F07-CA3D-DE11-B1ED-001617DBCF6A.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/F63FDCEB-CA3D-DE11-A3AD-0019B9F72BFF.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/E6B3876B-CD3D-DE11-9E0D-000423D99F1E.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/CE04F0D4-CC3D-DE11-8465-001D09F244DE.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/C43129D0-CA3D-DE11-BA1C-001D09F2A49C.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/B2F78729-CD3D-DE11-9019-001D09F24448.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/9CDE8A75-CB3D-DE11-98B0-001D09F2A690.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/96F539A1-CB3D-DE11-9B6B-001D09F25217.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/94EB24CC-CB3D-DE11-A4D8-0019DB29C614.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/94919798-C83D-DE11-B793-001D09F24F65.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/90979075-CD3D-DE11-9D71-001D09F28F0C.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/6A2DE1D5-CA3D-DE11-B058-001D09F23A84.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/5EE96B31-CB3D-DE11-819A-001D09F25442.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/2AC7ADD9-C83D-DE11-B4FF-001D09F2447F.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/149A729C-CA3D-DE11-AA49-001617C3B5D8.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/142EB962-CA3D-DE11-AAFA-001617C3B6C6.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/10C82EA9-043E-DE11-B745-001D09F29524.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/08A81316-CC3D-DE11-8500-001D09F2546F.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/06C016EB-C93D-DE11-9A07-001D09F23C73.root'
] );
elif (useRelease == 'CMSSW_3_1_0') and (useSample == 'RelValTTbar') :
dataset = cms.untracked.vstring('/RelValTTbar/CMSSW_3_1_0-MC_31X_V1-v1/GEN-SIM-DIGI-RAW-HLTDEBUG')
readFiles.extend( [
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/F81AA535-C666-DE11-942A-001D09F24600.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/F45A3761-C766-DE11-8274-001D09F24FBA.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/ECDD6402-C466-DE11-AD8D-000423D99A8E.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/D0B6652D-C266-DE11-A7A6-001D09F24600.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/CA895E96-DE66-DE11-8768-001D09F248FD.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/B4FF6350-C466-DE11-BB33-001D09F24DA8.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/A80A52B0-C266-DE11-8A5A-001D09F25041.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/A6C8A82A-C266-DE11-8704-001D09F23A6B.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/A6350A56-C866-DE11-B573-001D09F24FBA.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/A4C58176-C566-DE11-ADC0-001D09F28D4A.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/A2C1AC27-C266-DE11-9667-001D09F2983F.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/8AE98AF2-C766-DE11-B315-001D09F26C5C.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/88F81419-C966-DE11-8481-001D09F24024.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/5EC0F22B-C266-DE11-A2DA-001D09F23A61.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/4A872C1B-C366-DE11-A844-001D09F25041.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/32CED660-C766-DE11-B873-001D09F28F11.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/308CF886-C866-DE11-95C7-001D09F28755.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/2A4FA6DE-C466-DE11-A598-000423D99E46.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/28327168-C666-DE11-9486-000423D99EEE.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/24FF0D62-CB66-DE11-8F1F-001D09F24DA8.root',
'/store/relval/CMSSW_3_1_0/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_31X_V1-v1/0001/14AEBDFE-C666-DE11-AA23-001D09F28755.root'
] );
elif (useRelease == 'CMSSW_2_2_12') and (useSample == 'RelValTTbar') :
dataset = cms.untracked.vstring('RelValTTbar/CMSSW_2_2_10_IDEAL_V12_v1/GEN-SIM-DIGI-RAW-HLTDEBUG')
readFiles.extend( [
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/FC0E794B-9F3D-DE11-8969-000423D6C8E6.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/DE8F4018-9E3D-DE11-993A-001D09F2423B.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/D88A54EB-9E3D-DE11-95AC-001617DBD230.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/D0D136FB-033E-DE11-A44E-001D09F28D4A.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/8E1C5431-9E3D-DE11-AF4B-001D09F28F11.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/86FC99FF-9D3D-DE11-92AF-001D09F290BF.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/7EAEEC8E-9E3D-DE11-8BC3-001D09F231C9.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/664A1CAD-9F3D-DE11-95D0-001D09F241B9.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/648417C1-9E3D-DE11-A52F-001D09F24682.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/58F61F49-9E3D-DE11-9B27-001D09F2523A.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/58117FD8-9E3D-DE11-8EEC-001617C3B778.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/3A53E076-9E3D-DE11-B98A-001D09F23A84.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/389C28A6-9E3D-DE11-843E-001D09F2447F.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0003/305A2B75-9E3D-DE11-BFAB-001D09F2423B.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/EE5B4533-933D-DE11-AD30-001D09F24DA8.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/EC88F7D0-9A3D-DE11-9836-001617E30E28.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/E2A462B0-9D3D-DE11-A2B6-001D09F244BB.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/CC9E91FC-933D-DE11-972F-001D09F25109.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/B845EA9A-9B3D-DE11-A9F9-001617C3B6FE.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/B67E5CE0-9D3D-DE11-83F1-001D09F291D2.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/92912A15-9D3D-DE11-B3C4-001D09F24448.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/749492B7-993D-DE11-9FBF-001617E30F50.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/706DA2E3-923D-DE11-97DA-001D09F241B4.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/6CDD71F8-973D-DE11-A993-001D09F297EF.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/6694F56B-9A3D-DE11-95EA-001D09F291D7.root',
'/store/relval/CMSSW_2_2_10/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V12_v1/0002/2255EDC6-9D3D-DE11-A02F-001D09F24D8A.root'
] );
else :
print('Error: no files for sample ', useSample, ', (pre)release ', useRelease, ' and global tag ', useGlobalTag, ' defined.')
sys.exit()
secFiles.extend([
])
elif useGlobalTag.count('STARTUP') :
if (useRelease == 'CMSSW_3_1_0') and (useSample == 'RelValQCD_Pt_80_120') :
dataset = cms.untracked.vstring('/RelValQCD_Pt_80_120/CMSSW_3_1_0-STARTUP31X_V1-v1/GEN-SIM-DIGI-RAW-HLTDEBUG')
readFiles.extend( [
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0002/2C209975-E066-DE11-9A95-001D09F27067.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/F8353FEB-3366-DE11-BB74-001D09F26509.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/F4E28000-3466-DE11-B243-000423D9997E.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/DCD49C28-3466-DE11-A772-001D09F251E0.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/CC34BA65-3466-DE11-B3EB-001D09F24D8A.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/A83F2050-3466-DE11-A56D-001D09F26509.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/A6983B60-3466-DE11-A04F-001D09F2983F.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/A03ACDE9-3166-DE11-B484-001D09F29619.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/9CFED7BA-3166-DE11-B8DA-001D09F2424A.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/761BDD0C-3466-DE11-8808-001D09F2983F.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/723AC396-3066-DE11-BA9F-001D09F27067.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/6CA51004-3466-DE11-BD6C-001D09F28F25.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/664957EA-3166-DE11-B7EA-001D09F2915A.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/549B80EA-3166-DE11-B467-000423D6CA6E.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/4E98BAB9-3166-DE11-9000-001D09F24EC0.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/3ED2AF60-3466-DE11-992C-001D09F28F25.root',
'/store/relval/CMSSW_3_1_0/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP31X_V1-v1/0001/2869A25E-3166-DE11-9634-001D09F28755.root'
] );
elif (useRelease == 'CMSSW_2_2_12') and (useSample == 'RelValQCD_Pt_80_120') :
dataset = cms.untracked.vstring('/RelValQCD_Pt_80_120/CMSSW_2_2_10_STARTUP_V11_v1/GEN-SIM-DIGI-RAW-HLTDEBUG')
readFiles.extend( [
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0003/1825DE62-043E-DE11-9AB7-001617C3B654.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/F2520292-863D-DE11-ACDF-001D09F2538E.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/F05F275A-893D-DE11-94B8-001D09F25217.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/D2855C5D-8A3D-DE11-8084-001D09F24D8A.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/AEAE2C43-893D-DE11-B3C0-001D09F24637.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/AE91E67A-873D-DE11-8BD6-001D09F297EF.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/AC1958A0-883D-DE11-96AE-001D09F29524.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/92706ABC-8B3D-DE11-A1E2-001D09F297EF.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/8EAE234C-8B3D-DE11-A87A-0019B9F72CC2.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/884EBAF1-873D-DE11-B5B7-001D09F291D2.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/68D12010-8C3D-DE11-BA50-001617C3B70E.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/68615FA2-8A3D-DE11-85AD-001D09F291D7.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/5C7DF99A-883D-DE11-BF37-001D09F2A465.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/52ED45CE-893D-DE11-A52E-001D09F24D4E.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/3AE45D80-863D-DE11-ADAA-00304879FA4A.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/164B8DC2-8B3D-DE11-A1BA-001617C3B654.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/14396117-8B3D-DE11-9A10-001D09F291D7.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/0EB74EF7-863D-DE11-AD5E-001D09F29597.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/026314A2-8A3D-DE11-8CB6-001D09F2447F.root',
'/store/relval/CMSSW_2_2_10/RelValQCD_Pt_80_120/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V11_v1/0002/00081940-863D-DE11-8158-0019B9F6C674.root'
] );
else :
print('Error: no files for sample ', useSample, ', (pre)release ', useRelease, ' and global tag ', useGlobalTag, ' defined.')
sys.exit()
secFiles.extend([
])
else :
print('Error: Global Tag ', useGlobalTag, ' not defined.')
sys.exit()
else :
# data
dataset = '/Cosmics/Commissioning09-v1/RAW'
print(' Running on set: '+ dataset)
readFiles.extend( [
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/00BD9A1F-B908-DE11-8B2C-000423D94A04.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/025E8B48-B608-DE11-A0EE-00161757BF42.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/027AA271-D208-DE11-9A7F-001617DBD5AC.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/04281D2F-D108-DE11-9A27-000423D944DC.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/065B0C1C-C008-DE11-A32B-001617E30F48.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/08B1054B-BD08-DE11-AF8B-001617C3B78C.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/0C055C33-D108-DE11-B678-001617C3B73A.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/0E480977-D208-DE11-BA78-001617C3B6E2.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/0E79251B-B908-DE11-83FF-000423D99CEE.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/101B8CA0-B508-DE11-B614-000423D99160.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/12C62C71-BF08-DE11-A48C-000423D99614.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/16A77E08-B008-DE11-9121-000423D8F63C.root'
]);
secFiles.extend([
])
if useLocalFiles :
readFiles = 'file:/afs/cern.ch/user/g/ghete/scratch0/CmsswTestFiles/testGt_L1GtPacker_source.root'
# load and configure modules via Global Tag
# https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrontierConditions
process.load('Configuration.StandardSequences.Geometry_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = useGlobalTag+'::All'
#process.load('L1Trigger.Configuration.L1Trigger_FakeConditions_cff')
# L1 GT/GMT packer
process.load("EventFilter.L1GlobalTriggerRawToDigi.l1GtPack_cfi")
# input tag for GT readout collection:
# input tag for GMT readout collection:
# source = hardware record
if useRelValSample == True :
daqGtInputTag = 'simGtDigis'
muGmtInputTag = 'simGmtDigis'
else :
daqGtInputTag = 'l1GtUnpack'
muGmtInputTag = 'l1GtUnpack'
process.l1GtPack.DaqGtInputTag = daqGtInputTag
process.l1GtPack.MuGmtInputTag = muGmtInputTag
# mask for active boards (actually 16 bits)
# if bit is zero, the corresponding board will not be packed
# default: no board masked: ActiveBoardsMask = 0xFFFF
# no board masked (default)
#process.l1GtPack.ActiveBoardsMask = 0xFFFF
# GTFE only in the record
#process.l1GtPack.ActiveBoardsMask = 0x0000
# GTFE + FDL
#process.l1GtPack.ActiveBoardsMask = 0x0001
# GTFE + GMT
#process.l1GtPack.ActiveBoardsMask = 0x0100
# GTFE + FDL + GMT
#process.l1GtPack.ActiveBoardsMask = 0x0101
# set it to verbose
process.l1GtPack.Verbosity = cms.untracked.int32(1)
# path to be run
if useRelValSample == True :
process.p = cms.Path(process.l1GtPack)
else :
process.p = cms.Path(process.l1GtPack) # FIXME unpack first raw data
# Message Logger
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.debugModules = ['l1GtPack']
process.MessageLogger.cerr.enable = False
process.MessageLogger.files.L1GtPacker = cms.untracked.PSet(
threshold=cms.untracked.string('DEBUG'),
#threshold = cms.untracked.string('INFO'),
#threshold = cms.untracked.string('ERROR'),
DEBUG=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
INFO=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
WARNING=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
ERROR=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
default = cms.untracked.PSet(
limit=cms.untracked.int32(-1)
)
)
# summary
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
# output
process.outputL1GtPack = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('L1GtPacker.root'),
# keep only unpacked data in the ROOT file
outputCommands = cms.untracked.vstring('drop *',
'keep *_l1GtPack_*_*')
)
process.outpath = cms.EndPath(process.outputL1GtPack)
|
tests/test_constant.py | bibek22/einsteinpy | 485 | 11131285 | import astropy.units as u
import numpy as np
import pytest
from astropy import constants
from numpy.testing import assert_allclose
from einsteinpy.constant import Cosmo_Const, Cosmo_Const_base
def test_Cosmo_Const_returns_correct_value_units():
cnst = Cosmo_Const
assert cnst.value == 2.036e-35
assert isinstance(cnst.unit, u.core.CompositeUnit)
assert isinstance(cnst, u.quantity.Quantity)
def test_Cosmo_const_is_astropy_constant_with_unit_and_uncert():
cnst = Cosmo_Const_base
assert isinstance(cnst, constants.Constant)
assert cnst.uncertainty == 8.1e-40
assert cnst.unit == u.Unit("1 / s2")
def test_Cosmo_Const_has_correct_metadata():
cnst = Cosmo_Const_base
assert cnst.name == "Cosmological Constant"
assert cnst.system == "si" and cnst.abbrev == "lambda"
assert cnst.reference == "Wikipedia"
|
test/test_graph.py | tknuth/sortvis | 117 | 11131296 | import os.path
from libsortvis import graph, sortable, algos
import libpry
OUTDIR = "tmp"
class _GraphTest(libpry.AutoTree):
def setUpAll(self):
if not os.path.exists(OUTDIR):
os.mkdir(OUTDIR)
class uWeave(_GraphTest):
def test_lineCoords(self):
csource = graph.ColourGradient((1, 1, 1), (0, 0, 0))
p = graph.Weave(
csource, 100, 100, 20, graph.rgb("ffffff"), graph.rgb("000000"),
False, 6, 1
)
r = p.lineCoords([1, 2, 3, 4, 5], 5, 0.02)
assert r[-1] == (1, 1)
# Lead-in
assert r[0][1] == r[1][1]
assert r[0][0] != r[1][0]
# Lead-out
assert r[-1][1] == r[-2][1]
assert r[-1][0] != r[-2][0]
def test_draw(self):
csource = graph.ColourGradient((1, 1, 1), (0, 0, 0))
p = graph.Weave(
csource, 100, 100, 20,
graph.rgb("ffffff"),
graph.rgb("000000"),
False, 6, 1
)
l = range(10)
l.reverse()
track = sortable.TrackList(l)
a = algos.insertionsort.insertionsort(track)
p.draw(track, "test", os.path.join(OUTDIR, "test_grayscale.png"))
class uDense(_GraphTest):
def test_draw(self):
csource = graph.ColourGradient((1, 1, 1), (0, 0, 0))
p = graph.Dense(csource, 20, graph.rgb("ffffff"), graph.rgb("000000"), False)
l = range(8)
l.reverse()
track = sortable.TrackList(l)
a = algos.insertionsort.insertionsort(track)
p.draw(track, "test", os.path.join(OUTDIR, "test_weave.png"))
class uUtils(libpry.AutoTree):
def test_rgb(self):
assert graph.rgb((255, 255, 255)) == (1, 1, 1)
assert graph.rgb("ffffff") == (1, 1, 1)
assert graph.rgb("000000") == (0, 0, 0)
class uColourSource(libpry.AutoTree):
def test_gradient(self):
g = graph.ColourGradient((1, 1, 1), (0, 0, 0))
assert g.colour(0, 10) == (1.0, 1.0, 1.0)
assert g.colour(10, 10) == (0, 0, 0)
assert g.colour(5, 10) == (0.5, 0.5, 0.5)
g = graph.ColourGradient((0, 0, 0), (1, 1, 1))
assert g.colour(0, 10) == (0, 0, 0)
assert g.colour(10, 10) == (1.0, 1.0, 1.0)
assert g.colour(5, 10) == (0.5, 0.5, 0.5)
def test_hilbert(self):
g = graph.ColourHilbert()
assert g.colour(50, 200)
assert g.colour(50, 200)
tests = [
uWeave(),
uDense(),
uUtils(),
uColourSource()
]
|
tools/gdb/gdb_chrome.py | Scopetta197/chromium | 212 | 11131301 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GDB support for Chrome types.
Add this to your gdb by amending your ~/.gdbinit as follows:
python
import sys
sys.path.insert(0, "/path/to/tools/gdb/")
import gdb_chrome
This module relies on the WebKit gdb module already existing in
your Python path.
"""
import gdb
import webkit
class String16Printer(webkit.StringPrinter):
def to_string(self):
return webkit.ustring_to_string(self.val['_M_dataplus']['_M_p'])
class GURLPrinter(webkit.StringPrinter):
def to_string(self):
return self.val['spec_']
class FilePathPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['path_']['_M_dataplus']['_M_p']
def lookup_function(val):
type_to_printer = {
'string16': String16Printer,
'GURL': GURLPrinter,
'FilePath': FilePathPrinter,
}
printer = type_to_printer.get(str(val.type), None)
if printer:
return printer(val)
return None
gdb.pretty_printers.append(lookup_function)
|
Configuration/StandardSequences/python/RunsAndWeights.py | Purva-Chaudhari/cmssw | 852 | 11131328 | RunsAndWeights = {
'Run2012_AB_C_D_oneRunPerEra' : 'SimGeneral.Configuration.RunsAndWeights_Run2012_AB_C_D_oneRunPerEra',
'Run2018_ABCD' : 'SimGeneral.Configuration.RunsAndWeights_Run2018_ABCD',
'Run2018_Equal_Lumi_Integer_Weights' : 'SimGeneral.Configuration.RunsAndWeights_Run2018_Equal_Lumi_Integer_Weights'
}
|
src/cryptography/hazmat/primitives/kdf/concatkdf.py | ceridwen/cryptography | 9,953 | 11131343 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import struct
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import HMACBackend
from cryptography.hazmat.backends.interfaces import HashBackend
from cryptography.hazmat.primitives import constant_time, hashes, hmac
from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
def _int_to_u32be(n):
return struct.pack('>I', n)
def _common_args_checks(algorithm, length, otherinfo):
max_length = algorithm.digest_size * (2 ** 32 - 1)
if length > max_length:
raise ValueError(
"Can not derive keys larger than {} bits.".format(
max_length
))
if otherinfo is not None:
utils._check_bytes("otherinfo", otherinfo)
def _concatkdf_derive(key_material, length, auxfn, otherinfo):
utils._check_byteslike("key_material", key_material)
output = [b""]
outlen = 0
counter = 1
while (length > outlen):
h = auxfn()
h.update(_int_to_u32be(counter))
h.update(key_material)
h.update(otherinfo)
output.append(h.finalize())
outlen += len(output[-1])
counter += 1
return b"".join(output)[:length]
@utils.register_interface(KeyDerivationFunction)
class ConcatKDFHash(object):
def __init__(self, algorithm, length, otherinfo, backend):
_common_args_checks(algorithm, length, otherinfo)
self._algorithm = algorithm
self._length = length
self._otherinfo = otherinfo
if self._otherinfo is None:
self._otherinfo = b""
if not isinstance(backend, HashBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HashBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
self._backend = backend
self._used = False
def _hash(self):
return hashes.Hash(self._algorithm, self._backend)
def derive(self, key_material):
if self._used:
raise AlreadyFinalized
self._used = True
return _concatkdf_derive(key_material, self._length,
self._hash, self._otherinfo)
def verify(self, key_material, expected_key):
if not constant_time.bytes_eq(self.derive(key_material), expected_key):
raise InvalidKey
@utils.register_interface(KeyDerivationFunction)
class ConcatKDFHMAC(object):
def __init__(self, algorithm, length, salt, otherinfo, backend):
_common_args_checks(algorithm, length, otherinfo)
self._algorithm = algorithm
self._length = length
self._otherinfo = otherinfo
if self._otherinfo is None:
self._otherinfo = b""
if salt is None:
salt = b"\x00" * algorithm.block_size
else:
utils._check_bytes("salt", salt)
self._salt = salt
if not isinstance(backend, HMACBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HMACBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
self._backend = backend
self._used = False
def _hmac(self):
return hmac.HMAC(self._salt, self._algorithm, self._backend)
def derive(self, key_material):
if self._used:
raise AlreadyFinalized
self._used = True
return _concatkdf_derive(key_material, self._length,
self._hmac, self._otherinfo)
def verify(self, key_material, expected_key):
if not constant_time.bytes_eq(self.derive(key_material), expected_key):
raise InvalidKey
|
firefly/main.py | EdwardBetts/firefly | 207 | 11131345 | <reponame>EdwardBetts/firefly
import os
import sys
import argparse
import importlib
import yaml
import logging
from .app import Firefly
from .validator import ValidationError, FireflyError
from .version import __version__
from wsgiref.simple_server import make_server
logger = logging.getLogger("firefly")
def load_from_env():
functions = None
token = None
allow_origins = ''
if 'FIREFLY_FUNCTIONS' in os.environ:
function_names = os.environ['FIREFLY_FUNCTIONS'].split(",")
try:
functions = load_functions(function_names)
except (ImportError, AttributeError) as err:
sys.exit(1)
if 'FIREFLY_TOKEN' in os.environ:
token = os.environ['FIREFLY_TOKEN']
if 'FIREFLY_ALLOW_ORIGINS' in os.environ:
allow_origins = os.environ['FIREFLY_ALLOW_ORIGINS']
if 'FIREFLY_CONFIG' in os.environ:
logger.info("loading config file: %s", os.environ['FIREFLY_CONFIG'])
functions, token = parse_config_data(parse_config_file(os.environ['FIREFLY_CONFIG']))
if functions:
add_routes(app, functions)
if token:
app.set_auth_token(token)
if allow_origins:
app.set_allowed_origins(allow_origins)
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("--version", action="store_true", help="Prints the firefly version")
p.add_argument("-t", "--token", help="token to authenticate the requests")
p.add_argument("-b", "--bind", dest="ADDRESS", default="127.0.0.1:8000")
p.add_argument("-c", "--config", dest="config_file", default=None)
p.add_argument("--allow-origins", default=None, help="Origins to allow for cross-origin resource sharing")
p.add_argument("functions", nargs='*', help="functions to serve")
return p.parse_args()
def load_function(function_spec, path=None, name=None):
if "." not in function_spec:
raise Exception("Invalid function: {}, please specify it as module.function".format(function_spec))
mod_name, func_name = function_spec.rsplit(".", 1)
try:
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
except (ImportError, AttributeError) as err:
print("Failed to load {}: {}".format(function_spec, str(err)))
raise
path = path or "/"+func_name
name = name or func_name
return (path, name, func)
def load_functions(function_specs):
return [load_function(function_spec) for function_spec in function_specs]
def parse_config_file(config_file):
if not os.path.exists(config_file):
raise FireflyError("Specified config file does not exist.")
with open(config_file) as f:
config_dict = yaml.safe_load(f)
return config_dict
def parse_config_data(config_dict):
functions = [(load_function(f["function"], path=f.get("path"), name=name, ))
for name, f in config_dict["functions"].items()]
token = config_dict.get("token", None)
return functions, token
def add_routes(app, functions):
for path, name, function in functions:
app.add_route(path, function, name)
def setup_logger():
level = logging.INFO
logging.basicConfig(
level=level,
format="%(asctime)s %(name)s [%(levelname)s] %(message)s",
datefmt='%Y-%m-%d %H:%M:%S')
def main():
# ensure current directory is added to sys.path
if "" not in sys.path:
sys.path.insert(0, "")
args = parse_args()
if args.version:
print("Firefly Version {}".format(__version__))
return
if (args.functions and args.config_file) or (not args.functions and not args.config_file):
raise FireflyError("Invalid arguments provided. Please specify either a config file or a list of functions.")
token = None
if len(args.functions):
functions = load_functions(args.functions)
elif args.config_file:
functions, token = parse_config_data(parse_config_file(args.config_file))
token = token or args.token
app.set_auth_token(token)
if args.allow_origins:
app.set_allowed_origins(args.allow_origins)
add_routes(app, functions)
host, port = args.ADDRESS.split(":", 1)
port = int(port)
print("http://{}/".format(args.ADDRESS))
server = make_server(host, port, app)
server.serve_forever()
setup_logger()
logger.info("Starting Firefly...")
app = Firefly()
load_from_env()
|
tests/pytorch_pfn_extras_tests/cuda_tests/test_allocator.py | yasuyuky/pytorch-pfn-extras | 243 | 11131349 | import pytest
import torch
import pytorch_pfn_extras as ppe
def test_stream():
cupy = pytest.importorskip('cupy')
assert 0 == cupy.cuda.get_current_stream().ptr
assert 0 == torch.cuda.current_stream().cuda_stream
# Use the default stream.
cupy.arange(10)
torch.arange(10)
# Use the custom stream.
stream = torch.cuda.Stream()
with ppe.cuda.stream(stream):
cupy.arange(10)
torch.arange(10)
assert cupy.cuda.get_current_stream().ptr == stream.cuda_stream
assert 0 == cupy.cuda.get_current_stream().ptr
assert 0 == torch.cuda.current_stream().cuda_stream
def test_stream_no_cupy():
stream = torch.cuda.Stream()
with ppe.cuda.stream(stream):
assert torch.cuda.current_stream().cuda_stream == stream.cuda_stream
def test_stream_none():
assert 0 == torch.cuda.current_stream().cuda_stream
with ppe.cuda.stream(None):
assert 0 == torch.cuda.current_stream().cuda_stream
class TestMemoryPool:
@pytest.fixture
def cupy(self):
cupy = pytest.importorskip('cupy')
mempool = cupy.get_default_memory_pool()
yield cupy
mempool.free_all_blocks()
cupy.cuda.set_allocator(mempool.malloc)
def test_use_default_mempool(self, cupy):
# disable mempool
mempool = cupy.get_default_memory_pool()
used_bytes = mempool.used_bytes()
cupy.cuda.set_allocator(None)
arr1 = cupy.zeros(10)
assert used_bytes == mempool.used_bytes()
ppe.cuda.use_default_mempool_in_cupy()
arr2 = cupy.zeros(10)
assert used_bytes < mempool.used_bytes()
del arr1
del arr2
def test_use_torch_mempool(self, cupy):
mempool = cupy.get_default_memory_pool()
used_bytes = mempool.used_bytes()
arr1 = cupy.zeros(10)
assert used_bytes < mempool.used_bytes()
used_bytes = mempool.used_bytes()
ppe.cuda.use_torch_mempool_in_cupy()
arr2 = cupy.zeros(10)
assert used_bytes == mempool.used_bytes()
del arr1
del arr2
def test_use_torch_mempool_stream(self, cupy):
ppe.cuda.use_torch_mempool_in_cupy()
stream = torch.cuda.Stream()
with ppe.cuda.stream(stream):
arr1 = torch.arange(10)
arr2 = cupy.arange(10)
assert (arr1.numpy() == arr2.get()).all()
del arr1
del arr2
def test_use_torch_mempool_stream_mismatch(self, cupy):
ppe.cuda.use_torch_mempool_in_cupy()
stream = cupy.cuda.Stream()
try:
stream.use()
with pytest.raises(
RuntimeError, match='pytorch_pfn_extras.cuda.stream'):
arr = cupy.arange(10)
del arr
finally:
cupy.cuda.Stream.null.use()
|
audio/tools.py | ishine/Comprehensive-Transformer-TTS | 147 | 11131356 | import torch
import numpy as np
from scipy.io.wavfile import write
from audio.audio_processing import griffin_lim
def get_mel_from_wav(audio, _stft):
audio = torch.clip(torch.FloatTensor(audio).unsqueeze(0), -1, 1)
audio = torch.autograd.Variable(audio, requires_grad=False)
melspec, energy = _stft.mel_spectrogram(audio)
melspec = torch.squeeze(melspec, 0).numpy().astype(np.float32)
energy = torch.squeeze(energy, 0).numpy().astype(np.float32)
return melspec, energy
def inv_mel_spec(mel, out_filename, _stft, griffin_iters=60):
mel = torch.stack([mel])
mel_decompress = _stft.spectral_de_normalize(mel)
mel_decompress = mel_decompress.transpose(1, 2).data.cpu()
spec_from_mel_scaling = 1000
spec_from_mel = torch.mm(mel_decompress[0], _stft.mel_basis)
spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)
spec_from_mel = spec_from_mel * spec_from_mel_scaling
audio = griffin_lim(
torch.autograd.Variable(spec_from_mel[:, :, :-1]), _stft._stft_fn, griffin_iters
)
audio = audio.squeeze()
audio = audio.cpu().numpy()
audio_path = out_filename
write(audio_path, _stft.sampling_rate, audio)
def librosa_pad_lr(x, fsize, fshift, pad_sides=1):
'''compute right padding (final frame) or both sides padding (first and final frames)
'''
assert pad_sides in (1, 2)
# return int(fsize // 2)
pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]
if pad_sides == 1:
return 0, pad
else:
return pad // 2, pad // 2 + pad % 2
# Conversions
def amp_to_db(x):
return 20 * np.log10(np.maximum(1e-5, x))
def normalize(S, min_level_db):
return (S - min_level_db) / -min_level_db
|
1031 Maximum Sum of Two Non-Overlapping Subarrays.py | krishna13052001/LeetCode | 872 | 11131389 | <filename>1031 Maximum Sum of Two Non-Overlapping Subarrays.py<gh_stars>100-1000
#!/usr/bin/python3
"""
Given an array A of non-negative integers, return the maximum sum of elements in
two non-overlapping (contiguous) subarrays, which have lengths L and M. (For
clarification, the L-length subarray could occur before or after the M-length
subarray.)
Formally, return the largest V for which V = (A[i] + A[i+1] + ... + A[i+L-1]) +
(A[j] + A[j+1] + ... + A[j+M-1]) and either:
0 <= i < i + L - 1 < j < j + M - 1 < A.length, or
0 <= j < j + M - 1 < i < i + L - 1 < A.length.
Example 1:
Input: A = [0,6,5,2,2,5,1,9,4], L = 1, M = 2
Output: 20
Explanation: One choice of subarrays is [9] with length 1, and [6,5] with length
2.
Example 2:
Input: A = [3,8,1,3,2,1,8,9,0], L = 3, M = 2
Output: 29
Explanation: One choice of subarrays is [3,8,1] with length 3, and [8,9] with
length 2.
Example 3:
Input: A = [2,1,5,6,0,9,5,0,3,8], L = 4, M = 3
Output: 31
Explanation: One choice of subarrays is [5,6,0,9] with length 4, and [3,8] with
length 3.
Note:
L >= 1
M >= 1
L + M <= A.length <= 1000
0 <= A[i] <= 1000
"""
from typing import List
class Solution:
def maxSumTwoNoOverlap(self, A: List[int], L: int, M: int) -> int:
"""
Prefix sum + Brute force O(N^2)
two pointer i, j
"""
n = len(A)
F = [0 for _ in range(n + 1)]
for i, a in enumerate(A):
F[i+1] = F[i] + a
ret = -float("inf")
for l, m in ((L, M), (M, L)):
for i in range(n + 1 - l):
for j in range(i + l, n + 1 - m): # upper needs +1 here
cur = F[i + l] - F[i] + F[j + m] - F[j]
ret = max(ret, cur)
return ret
if __name__ == "__main__":
assert Solution().maxSumTwoNoOverlap([0,6,5,2,2,5,1,9,4], 1, 2) == 20
|
lib/django-1.3/django/contrib/markup/templatetags/markup.py | MiCHiLU/google_appengine_sdk | 790 | 11131392 | <filename>lib/django-1.3/django/contrib/markup/templatetags/markup.py
"""
Set of "markup" template filters for Django. These filters transform plain text
markup syntaxes to HTML; currently there is support for:
* Textile, which requires the PyTextile library available at
http://loopcore.com/python-textile/
* Markdown, which requires the Python-markdown library from
http://www.freewisdom.org/projects/python-markdown
* reStructuredText, which requires docutils from http://docutils.sf.net/
"""
from django import template
from django.conf import settings
from django.utils.encoding import smart_str, force_unicode
from django.utils.safestring import mark_safe
register = template.Library()
def textile(value):
try:
import textile
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in {% textile %} filter: The Python textile library isn't installed.")
return force_unicode(value)
else:
return mark_safe(force_unicode(textile.textile(smart_str(value), encoding='utf-8', output='utf-8')))
textile.is_safe = True
def markdown(value, arg=''):
"""
Runs Markdown over a given value, optionally using various
extensions python-markdown supports.
Syntax::
{{ value|markdown:"extension1_name,extension2_name..." }}
To enable safe mode, which strips raw HTML and only returns HTML
generated by actual Markdown syntax, pass "safe" as the first
extension in the list.
If the version of Markdown in use does not support extensions,
they will be silently ignored.
"""
try:
import markdown
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in {% markdown %} filter: The Python markdown library isn't installed.")
return force_unicode(value)
else:
# markdown.version was first added in 1.6b. The only version of markdown
# to fully support extensions before 1.6b was the shortlived 1.6a.
if hasattr(markdown, 'version'):
extensions = [e for e in arg.split(",") if e]
if len(extensions) > 0 and extensions[0] == "safe":
extensions = extensions[1:]
safe_mode = True
else:
safe_mode = False
# Unicode support only in markdown v1.7 or above. Version_info
# exist only in markdown v1.6.2rc-2 or above.
if getattr(markdown, "version_info", None) < (1,7):
return mark_safe(force_unicode(markdown.markdown(smart_str(value), extensions, safe_mode=safe_mode)))
else:
return mark_safe(markdown.markdown(force_unicode(value), extensions, safe_mode=safe_mode))
else:
return mark_safe(force_unicode(markdown.markdown(smart_str(value))))
markdown.is_safe = True
def restructuredtext(value):
try:
from docutils.core import publish_parts
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in {% restructuredtext %} filter: The Python docutils library isn't installed.")
return force_unicode(value)
else:
docutils_settings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", {})
parts = publish_parts(source=smart_str(value), writer_name="html4css1", settings_overrides=docutils_settings)
return mark_safe(force_unicode(parts["fragment"]))
restructuredtext.is_safe = True
register.filter(textile)
register.filter(markdown)
register.filter(restructuredtext)
|
cnstd/utils/metrics.py | breezedeus/cnstd | 266 | 11131409 | <reponame>breezedeus/cnstd
# coding: utf-8
# Copyright (C) 2021, [Breezedeus](https://github.com/breezedeus).
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Credits: adapted from https://github.com/mindee/doctr
import numpy as np
import cv2
from typing import List, Tuple, Dict, Optional
from unidecode import unidecode
from scipy.optimize import linear_sum_assignment
from .geometry import rbbox_to_polygon, fit_rbbox
__all__ = [
'TextMatch',
'box_iou',
'box_ioa',
'mask_iou',
'rbox_to_mask',
'nms',
'LocalizationConfusion',
]
def string_match(word1: str, word2: str) -> Tuple[bool, bool, bool, bool]:
"""Perform string comparison with multiple levels of tolerance
Args:
word1: a string
word2: another string
Returns:
a tuple with booleans specifying respectively whether the raw strings, their lower-case counterparts, their
unidecode counterparts and their lower-case unidecode counterparts match
"""
raw_match = word1 == word2
caseless_match = word1.lower() == word2.lower()
unidecode_match = unidecode(word1) == unidecode(word2)
# Warning: the order is important here otherwise the pair ("EUR", "€") cannot be matched
unicase_match = unidecode(word1).lower() == unidecode(word2).lower()
return raw_match, caseless_match, unidecode_match, unicase_match
class TextMatch:
"""Implements text match metric (word-level accuracy) for recognition task.
The raw aggregated metric is computed as follows:
.. math::
\\forall X, Y \\in \\mathcal{W}^N,
TextMatch(X, Y) = \\frac{1}{N} \\sum\\limits_{i=1}^N f_{Y_i}(X_i)
with the indicator function :math:`f_{a}` defined as:
.. math::
\\forall a, x \\in \\mathcal{W},
f_a(x) = \\left\\{
\\begin{array}{ll}
1 & \\mbox{if } x = a \\\\
0 & \\mbox{otherwise.}
\\end{array}
\\right.
where :math:`\\mathcal{W}` is the set of all possible character sequences,
:math:`N` is a strictly positive integer.
Example::
>>> metric = TextMatch()
>>> metric.update(['Hello', 'world'], ['hello', 'world'])
>>> metric.summary()
"""
def __init__(self) -> None:
self.reset()
def update(self, gt: List[str], pred: List[str],) -> None:
"""Update the state of the metric with new predictions
Args:
gt: list of groung-truth character sequences
pred: list of predicted character sequences"""
if len(gt) != len(pred):
raise AssertionError(
"prediction size does not match with ground-truth labels size"
)
for gt_word, pred_word in zip(gt, pred):
_raw, _caseless, _unidecode, _unicase = string_match(gt_word, pred_word)
self.raw += int(_raw)
self.caseless += int(_caseless)
self.unidecode += int(_unidecode)
self.unicase += int(_unicase)
self.total += len(gt)
def summary(self) -> Dict[str, float]:
"""Computes the aggregated metrics
Returns:
a dictionary with the exact match score for the raw data, its lower-case counterpart, its unidecode
counterpart and its lower-case unidecode counterpart
"""
if self.total == 0:
raise AssertionError(
"you need to update the metric before getting the summary"
)
return dict(
raw=self.raw / self.total,
caseless=self.caseless / self.total,
unidecode=self.unidecode / self.total,
unicase=self.unicase / self.total,
)
def reset(self) -> None:
self.raw = 0
self.caseless = 0
self.unidecode = 0
self.unicase = 0
self.total = 0
def box_iou(gt_boxes: np.ndarray, pred_boxes: np.ndarray) -> np.ndarray:
"""Compute the IoU between two sets of bounding boxes
Args:
gt_boxes: bounding boxes of shape (N, 4) in format (xmin, ymin, xmax, ymax)
pred_boxes: bounding boxes of shape (M, 4) in format (xmin, ymin, xmax, ymax)
Returns:
the IoU matrix of shape (N, M)
"""
num_gts, num_preds = gt_boxes.shape[0], pred_boxes.shape[0]
iou_mat = np.zeros((num_gts, num_preds), dtype=np.float32)
prec_mat = np.zeros((num_gts, num_preds), dtype=np.float32)
recall_mat = np.zeros((num_gts, num_preds), dtype=np.float32)
if gt_boxes.shape[0] > 0 and pred_boxes.shape[0] > 0:
l1, t1, r1, b1 = np.split(gt_boxes, 4, axis=1)
l2, t2, r2, b2 = np.split(pred_boxes, 4, axis=1)
left = np.maximum(l1, l2.T)
top = np.maximum(t1, t2.T)
right = np.minimum(r1, r2.T)
bot = np.minimum(b1, b2.T)
intersection = np.clip(right - left, 0, np.Inf) * np.clip(bot - top, 0, np.Inf)
union = (r1 - l1) * (b1 - t1) + ((r2 - l2) * (b2 - t2)).T - intersection
iou_mat = intersection / (union + 1e-6)
prec_mat = intersection / (np.zeros(num_gts) + ((r2 - l2) * (b2 - t2)).T + 1e-6)
recall_mat = intersection / ((r1 - l1) * (b1 - t1) + np.zeros(num_preds).T + 1e-6)
return iou_mat, prec_mat, recall_mat
def box_ioa(boxes_1: np.ndarray, boxes_2: np.ndarray) -> np.ndarray:
"""Compute the IoA (intersection over area) between two sets of bounding boxes:
ioa(i, j) = inter(i, j) / area(i)
Args:
boxes_1: bounding boxes of shape (N, 4) in format (xmin, ymin, xmax, ymax)
boxes_2: bounding boxes of shape (M, 4) in format (xmin, ymin, xmax, ymax)
Returns:
the IoA matrix of shape (N, M)
"""
ioa_mat = np.zeros((boxes_1.shape[0], boxes_2.shape[0]), dtype=np.float32)
if boxes_1.shape[0] > 0 and boxes_2.shape[0] > 0:
l1, t1, r1, b1 = np.split(boxes_1, 4, axis=1)
l2, t2, r2, b2 = np.split(boxes_2, 4, axis=1)
left = np.maximum(l1, l2.T)
top = np.maximum(t1, t2.T)
right = np.minimum(r1, r2.T)
bot = np.minimum(b1, b2.T)
intersection = np.clip(right - left, 0, np.Inf) * np.clip(bot - top, 0, np.Inf)
area = (r1 - l1) * (b1 - t1)
ioa_mat = intersection / area
return ioa_mat
def mask_iou(
gt_masks: np.ndarray, pred_masks: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Compute the IoU between two sets of boolean masks
Args:
gt_masks: boolean masks of shape (N, H, W)
pred_masks: boolean masks of shape (N, H, W)
Returns:
the IoU vector of shape [N]
the precision vector of shape [N]
the recall vector of shape [N]
"""
if gt_masks.shape != pred_masks.shape:
raise AssertionError("both boolean masks should have the same spatial shape")
iou_vec = np.zeros((gt_masks.shape[0],), dtype=np.float32)
precision = np.zeros((gt_masks.shape[0],), dtype=np.float32)
recall = np.zeros((gt_masks.shape[0],), dtype=np.float32)
if gt_masks.shape[0] > 0 and pred_masks.shape[0] > 0:
intersection = np.logical_and(gt_masks, pred_masks).sum(axis=(1, 2))
union = np.logical_or(gt_masks, pred_masks).sum(axis=(1, 2))
prec_deno = pred_masks.sum(axis=(1, 2))
gt_deno = gt_masks.sum(axis=(1, 2))
iou_vec = intersection / (union + 1e-6)
precision = intersection / (prec_deno + 1e-6)
recall = intersection / (gt_deno + 1e-6)
return iou_vec, precision, recall
def rbox_to_mask(boxes_list: List[np.ndarray], shape: Tuple[int, int]) -> np.ndarray:
"""Convert boxes to masks
Args:
boxes_list: list of rotated bounding boxes of shape (M, 5) in format (x, y, w, h, alpha)
shape: spatial shapes of the output masks
Returns:
the boolean masks of shape (N, H, W)
"""
batch_size = len(boxes_list)
masks = np.zeros((batch_size, *shape), dtype=np.uint8)
for idx, boxes in enumerate(boxes_list):
if boxes.shape[0] > 0:
# Get absolute coordinates
if boxes.dtype != np.int:
abs_boxes = boxes.copy()
abs_boxes = abs_boxes.round().astype(np.int)
else:
abs_boxes = boxes
abs_boxes[:, 2:] = abs_boxes[:, 2:] + 1
# TODO: optimize slicing to improve vectorization
for _box in abs_boxes:
box = rbbox_to_polygon(_box)
cv2.fillPoly(masks[idx], [np.array(box, np.int32)], 1)
return masks.astype(bool)
def nms(boxes: np.ndarray, thresh: float = 0.5) -> List[int]:
"""Perform non-max suppression, borrowed from <https://github.com/rbgirshick/fast-rcnn>`_.
Args:
boxes: np array of straight boxes: (*, 5), (xmin, ymin, xmax, ymax, score)
thresh: iou threshold to perform box suppression.
Returns:
A list of box indexes to keep
"""
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
scores = boxes[:, 4]
areas = (x2 - x1) * (y2 - y1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
class LocalizationConfusion:
"""Implements common confusion metrics and mean IoU for localization evaluation.
The aggregated metrics are computed as follows:
.. math::
\\forall Y \\in \\mathcal{B}^N, \\forall X \\in \\mathcal{B}^M, \\\\
Recall(X, Y) = \\frac{1}{N} \\sum\\limits_{i=1}^N g_{X}(Y_i) \\\\
Precision(X, Y) = \\frac{1}{M} \\sum\\limits_{i=1}^N g_{X}(Y_i) \\\\
meanIoU(X, Y) = \\frac{1}{M} \\sum\\limits_{i=1}^M \\max\\limits_{j \\in [1, N]} IoU(X_i, Y_j)
with the function :math:`IoU(x, y)` being the Intersection over Union between bounding boxes :math:`x` and
:math:`y`, and the function :math:`g_{X}` defined as:
.. math::
\\forall y \\in \\mathcal{B},
g_X(y) = \\left\\{
\\begin{array}{ll}
1 & \\mbox{if } y\\mbox{ has been assigned to any }(X_i)_i\\mbox{ with an }IoU \\geq 0.5 \\\\
0 & \\mbox{otherwise.}
\\end{array}
\\right.
where :math:`\\mathcal{B}` is the set of possible bounding boxes,
:math:`N` (number of ground truths) and :math:`M` (number of predictions) are strictly positive integers.
Example::
Args:
iou_thresh: minimum IoU to consider a pair of prediction and ground truth as a match
"""
def __init__(
self,
iou_thresh: float = 0.5,
rotated_bbox: bool = False,
mask_shape: Tuple[int, int] = (1024, 1024),
) -> None:
self.iou_thresh = iou_thresh
self.rotated_bbox = rotated_bbox
self.mask_shape = mask_shape
self.reset()
def update(
self, gt_boxes: List[List[np.ndarray]], norm_preds: List[np.ndarray]
) -> Dict[str, float]:
"""
Args:
gt_boxes: 这里面的值是未归一化到 [0, 1] 的
norm_preds: 这里面的值是归一化到 [0, 1] 的
Returns:
"""
gts = self._transform_gt_polygons(gt_boxes)
preds = []
for n_pred in norm_preds:
pred = n_pred.copy()
pred[:, [0, 2]] *= self.mask_shape[1]
pred[:, [1, 3]] *= self.mask_shape[0]
preds.append(pred)
cur_iou, cur_matches = 0.0, 0.0
batch_size = len(preds)
if batch_size > 0:
# Compute IoU
if self.rotated_bbox:
mask_gts = rbox_to_mask(gts, shape=self.mask_shape)
mask_preds = rbox_to_mask(preds, shape=self.mask_shape)
iou_vec, prec_vec, recall_vec = mask_iou(mask_gts, mask_preds)
cur_iou = iou_vec.sum()
cur_prec = prec_vec.sum()
cur_recall = recall_vec.sum()
cur_matches = int((iou_vec >= self.iou_thresh).sum())
else:
iou_mat, prec_mat, recall_mat = box_iou(np.concatenate(gts), np.concatenate(preds))
cur_iou = float(iou_mat.max(axis=1).sum())
cur_prec = float(prec_mat.max(axis=1).sum())
cur_recall = float(recall_mat.max(axis=1).sum())
# Assign pairs
gt_indices, pred_indices = linear_sum_assignment(-iou_mat)
cur_matches = int(
(iou_mat[gt_indices, pred_indices] >= self.iou_thresh).sum()
)
batch_res = {'iou': cur_iou, 'match': cur_matches, 'precision': cur_prec, 'recall': cur_recall}
cur_res = dict()
for name, val in batch_res.items():
self.total_res[name] += val
cur_res[name] = val / (1e-6 + batch_size)
# Update counts
self.num_gts += batch_size
return cur_res
def _transform_gt_polygons(
self, polgons: List[List[np.ndarray]]
) -> List[np.ndarray]:
"""
Args:
polgons: 最里层每个 np.ndarray 是个 [4, 2] 的矩阵,表示一个box的4个点的坐标。
Returns:
list of rotated bounding boxes of shape (M, 5) in format (x, y, w, h, alpha)
"""
out = []
for boxes in polgons:
new_boxes = []
for box in boxes:
box = box.astype(np.uint)
new_boxes.append(
fit_rbbox(box) if self.rotated_bbox else cv2.boundingRect(box)
)
out.append(np.asarray(new_boxes))
return out
def summary(self) -> Tuple[Optional[float], Optional[float]]:
"""Computes the aggregated metrics
Returns:
a tuple with the recall, precision and meanIoU scores
"""
num_gts = 1e-6 + self.num_gts
out_res = {name: val / num_gts for name, val in self.total_res.items()}
return out_res
def reset(self) -> None:
self.num_gts = 0
self.total_res = {'iou': 0.0, 'match': 0.0, 'precision': 0.0, 'recall': 0.0}
|
scale/queue/test/messages/test_requeue_jobs_bulk.py | kaydoh/scale | 121 | 11131436 | <reponame>kaydoh/scale
from __future__ import unicode_literals
from datetime import timedelta
import django
from django.test import TestCase
import batch.test.utils as batch_test_utils
import recipe.test.utils as recipe_test_utils
from error.test import utils as error_test_utils
from job.configuration.data.job_data import JobData
from job.test import utils as job_test_utils
from queue.messages.queued_jobs import QueuedJob
from queue.messages.requeue_jobs_bulk import RequeueJobsBulk
class TestRequeueJobsBulk(TestCase):
def setUp(self):
django.setup()
def test_json(self):
"""Tests coverting a RequeueJobsBulk message to and from JSON"""
sys_err = error_test_utils.create_error(category='SYSTEM')
data = JobData()
batch = batch_test_utils.create_batch()
recipe = recipe_test_utils.create_recipe()
job_type = job_test_utils.create_seed_job_type()
job_1 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err,
input=data.get_dict())
job_1.batch_id = batch.id
job_1.recipe_id = recipe.id
job_1.save()
job_2 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='CANCELED', error=sys_err,
input=data.get_dict())
# Create message
message = RequeueJobsBulk()
message.started = job_1.last_modified - timedelta(seconds=1)
message.ended = job_1.last_modified + timedelta(seconds=1)
message.error_categories = ['SYSTEM']
message.error_ids = [sys_err.id]
message.job_ids = [job_1.id]
message.job_type_ids = [job_type.id]
message.priority = 1
message.status = 'FAILED'
message.job_type_names = [job_type.name]
message.batch_ids = [batch.id]
message.recipe_ids = [recipe.id]
message.is_superseded = False
# Convert message to JSON and back, and then execute
message_json_dict = message.to_json()
new_message = RequeueJobsBulk.from_json(message_json_dict)
result = new_message.execute()
self.assertTrue(result)
# Should be one re-queue message for job 1
self.assertEqual(len(new_message.new_messages), 1)
message = new_message.new_messages[0]
self.assertEqual(message.type, 'requeue_jobs')
self.assertListEqual(message._requeue_jobs, [QueuedJob(job_1.id, job_1.num_exes)])
self.assertEqual(message.priority, 1)
def test_execute(self):
"""Tests calling RequeueJobsBulk.execute() successfully"""
# Importing module here to patch the max batch size
import queue.messages.requeue_jobs_bulk
queue.messages.requeue_jobs_bulk.MAX_BATCH_SIZE = 5
sys_err = error_test_utils.create_error(category='SYSTEM')
data = JobData()
job_type = job_test_utils.create_seed_job_type()
job_1 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err,
input=data.get_dict())
job_2 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err,
input=data.get_dict())
job_3 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='FAILED', error=sys_err)
job_4 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err,
input=data.get_dict())
job_5 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='CANCELED', error=sys_err,
input=data.get_dict())
job_6 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err,
input=data.get_dict())
job_7 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', error=sys_err,
input=data.get_dict())
# Create message
message = queue.messages.requeue_jobs_bulk.RequeueJobsBulk()
message.error_ids = [sys_err.id]
message.job_type_ids = [job_type.id]
message.priority = 10001
message.status = 'FAILED'
# Execute message
result = message.execute()
self.assertTrue(result)
# Should be two messages, one for next bulk re-queue and one for re-queuing the specific jobs
self.assertEqual(len(message.new_messages), 2)
requeue_bulk_message = message.new_messages[0]
requeue_message = message.new_messages[1]
self.assertEqual(requeue_bulk_message.type, 'requeue_jobs_bulk')
self.assertEqual(requeue_bulk_message.current_job_id, job_2.id)
self.assertEqual(requeue_message.type, 'requeue_jobs')
# Job 5 is skipped due to CANCELED and job 3 has not been queued yet (forced illegal state)
self.assertListEqual(requeue_message._requeue_jobs, [QueuedJob(job_7.id, job_7.num_exes),
QueuedJob(job_6.id, job_6.num_exes),
QueuedJob(job_4.id, job_4.num_exes),
QueuedJob(job_2.id, job_2.num_exes)])
self.assertEqual(requeue_message.priority, 10001)
# Test executing message again
message.new_messages = []
result = message.execute()
self.assertTrue(result)
# Should have same messages returned
self.assertEqual(len(message.new_messages), 2)
requeue_bulk_message = message.new_messages[0]
requeue_message = message.new_messages[1]
self.assertEqual(requeue_bulk_message.type, 'requeue_jobs_bulk')
self.assertEqual(requeue_bulk_message.current_job_id, job_2.id)
self.assertEqual(requeue_message.type, 'requeue_jobs')
# Job 5 is skipped due to CANCELED and job 3 has not been queued yet (forced illegal state)
self.assertListEqual(requeue_message._requeue_jobs, [QueuedJob(job_7.id, job_7.num_exes),
QueuedJob(job_6.id, job_6.num_exes),
QueuedJob(job_4.id, job_4.num_exes),
QueuedJob(job_2.id, job_2.num_exes)])
self.assertEqual(requeue_message.priority, 10001)
def test_execute_canceled(self):
"""Tests calling RequeueJobsBulk.execute() successfully to requeue canceled jobs"""
data = JobData()
job_type = job_test_utils.create_seed_job_type()
job_1 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='CANCELED', input=data.get_dict())
job_2 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='CANCELED')
# Create message
message = RequeueJobsBulk()
message.job_type_ids = [job_type.id]
message.priority = 10001
# Execute message
result = message.execute()
self.assertTrue(result)
# Should be one message for re-queuing both jobs
self.assertEqual(len(message.new_messages), 1)
requeue_message = message.new_messages[0]
self.assertEqual(requeue_message.type, 'requeue_jobs')
self.assertListEqual(requeue_message._requeue_jobs, [QueuedJob(job_2.id, job_2.num_exes),
QueuedJob(job_1.id, job_1.num_exes)])
self.assertEqual(requeue_message.priority, 10001)
# Test executing message again
message.new_messages = []
result = message.execute()
self.assertTrue(result)
# Should have same message returned
self.assertEqual(len(message.new_messages), 1)
requeue_message = message.new_messages[0]
self.assertEqual(requeue_message.type, 'requeue_jobs')
self.assertListEqual(requeue_message._requeue_jobs, [QueuedJob(job_2.id, job_2.num_exes),
QueuedJob(job_1.id, job_1.num_exes)])
self.assertEqual(requeue_message.priority, 10001)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.