max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
src/generate-jobs/calculate_quad_key.py | geops/osm2vectortiles | 524 | 12688885 | #!/usr/bin/env python
"""Calculate QuadKey for TSV file and append it as column
Usage:
calculate_quad_key.py <list_file>
calculate_quad_key.py (-h | --help)
calculate_quad_key.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
import sys
import csv
from docopt import docopt
def quad_tree(tx, ty, zoom):
"""
Converts XYZ tile coordinates to Microsoft QuadTree
http://www.maptiler.org/google-maps-coordinates-tile-bounds-projection/
"""
quad_key = ''
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i-1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quad_key += str(digit)
return quad_key
if __name__ == '__main__':
args = docopt(__doc__, version='0.1')
writer = csv.writer(sys.stdout, delimiter=' ')
with open(args['<list_file>'], "r") as file_handle:
for line in file_handle:
z, x, y = line.split('/')
writer.writerow([
line.strip(),
quad_tree(int(x), int(y), int(z))]
)
|
conans/test/functional/generators/package_info/deps_cpp_info_test.py | ssaavedra/conan | 6,205 | 12688890 | import textwrap
import unittest
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient
class DepsCppInfoTest(unittest.TestCase):
def test(self):
# https://github.com/conan-io/conan/issues/7598
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . dep/0.1@user/testing")
conanfile = textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
requires = "dep/0.1@user/testing"
def build(self):
self.output.info("DEPS_CPP_INFO_BIN: %s" % self.deps_cpp_info["dep"].bin_paths)
""")
client.save({"conanfile.py": conanfile})
client.run("create . pkg/0.1@user/testing")
self.assertIn("pkg/0.1@user/testing: DEPS_CPP_INFO_BIN: []", client.out)
client.run("install .")
client.run("build .")
self.assertIn("conanfile.py: DEPS_CPP_INFO_BIN: []", client.out)
|
colossalai/nn/optimizer/hybrid_adam.py | RichardoLuo/ColossalAI | 1,630 | 12688891 | <reponame>RichardoLuo/ColossalAI<filename>colossalai/nn/optimizer/hybrid_adam.py
import torch
from colossalai.utils import multi_tensor_applier
from colossalai.registry import OPTIMIZERS
from colossalai.nn.optimizer import CPU_ADAM_CNT
@OPTIMIZERS.register_module
class HybridAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Supports parameters updating on both GPU and CPU, depanding on the device of paramters.
But the parameters and gradients should on the same device:
* Parameters on CPU and gradients on CPU is allowed.
* Parameters on GPU and gradients on GPU is allowed.
* Parameters on GPU and gradients on CPU is **not** allowed.
Requires ColossalAI to be installed via ``pip install .``
This version of Hybrid Adam is an hybrid of CPUAdam and FusedAdam.
* For parameters updating on CPU, it uses CPUAdam.
* For parameters updating on GPU, it uses FusedAdam.
* Hybird precision calculation of fp16 and fp32 is supported, eg fp32 parameters and fp16 gradients.
:class:`colossalai.nn.optimizer.HybridAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
or ``torch.optim.Adam`` with ``adamw_mode=False``
Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
model_params (iterable): iterable of parameters of dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED yet in CPUAdam!
adamw_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
simd_log (boolean, optional): whether to show if you are using SIMD to
accelerate. (default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
# Number of fp32 shards for per parameter
# Param weight, grad, momentum and variance
num_fp32_shards_per_param = 4
def __init__(self,
model_params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
adamw_mode=True,
simd_log=False):
default_args = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction)
super(HybridAdam, self).__init__(model_params, default_args)
self.opt_id = CPU_ADAM_CNT()
self.adamw_mode = adamw_mode
try:
import cpu_adam
import colossal_C
except ImportError:
raise ImportError('Please install colossalai from source code to use HybridAdam')
self.cpu_adam_op = cpu_adam
self.cpu_adam_op.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode, simd_log)
self.gpu_adam_op = colossal_C.multi_tensor_adam
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
def __del__(self):
if self.cpu_adam_op:
self.cpu_adam_op.destroy_adam(self.opt_id)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for _, group in enumerate(self.param_groups):
g_l, p_l, m_l, v_l = [], [], [], []
group_step = 0
for _, p in enumerate(group['params']):
if p.grad is None:
continue
state = self.state[p]
target_device = p.device
if len(state) == 0:
state['step'] = 0
# gradient momentums
state['exp_avg'] = torch.zeros_like(p.data, dtype=torch.float, device=target_device)
# gradient variances
state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=torch.float, device=target_device)
state['step'] += 1
group_step = state['step']
beta1, beta2 = group['betas']
if target_device.type == 'cpu':
assert state['exp_avg'].device.type == 'cpu', "exp_avg should stay on cpu"
assert state['exp_avg_sq'].device.type == 'cpu', "exp_avg should stay on cpu"
self.cpu_adam_op.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'],
group['weight_decay'], group['bias_correction'], p.data, p.grad.data,
state['exp_avg'], state['exp_avg_sq'], -1)
elif target_device.type == 'cuda':
assert state['exp_avg'].device.type == 'cuda', "exp_avg should stay on cuda"
assert state['exp_avg_sq'].device.type == 'cuda', "exp_avg should stay on cuda"
# record the state by gruop and update at once
g_l.append(p.grad.data)
p_l.append(p.data)
m_l.append(state['exp_avg'])
v_l.append(state['exp_avg_sq'])
else:
raise RuntimeError
if len(g_l) > 0:
adamw_mode = 1 if self.adamw_mode else 0
bias_correction = 1 if group['bias_correction'] else 0
multi_tensor_applier(self.gpu_adam_op, self._dummy_overflow_buf, [g_l, p_l, m_l, v_l], group['lr'],
group['betas'][0], group['betas'][1], group['eps'], group_step, adamw_mode,
bias_correction, group['weight_decay'])
return loss
|
cctbx/adp_restraints/flags.py | dperl-sol/cctbx_project | 155 | 12688892 | from __future__ import absolute_import, division, print_function
from libtbx import adopt_init_args
import sys
class flags(object):
def __init__(self,
adp_similarity=None,
rigid_bond=None,
isotropic_adp=None,
default=False):
if (adp_similarity is None): adp_similarity = default
if (rigid_bond is None): rigid_bond = default
if (isotropic_adp is None): isotropic_adp = default
adopt_init_args(self, locals())
def show(self, f=None):
if (f is None): f = sys.stdout
print("adp_restraints.manager.flags:", file=f)
print(" adp_similarity:", self.adp_similarity, file=f)
print(" rigid_bond:", self.rigid_bond, file=f)
print(" isotropic_adp:", self.isotropic_adp, file=f)
|
tutorial/python/1-Flat.py | ScriptBox99/facebook-faiss | 17,006 | 12688897 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
d = 64 # dimension
nb = 100000 # database size
nq = 10000 # nb of queries
np.random.seed(1234) # make reproducible
xb = np.random.random((nb, d)).astype('float32')
xb[:, 0] += np.arange(nb) / 1000.
xq = np.random.random((nq, d)).astype('float32')
xq[:, 0] += np.arange(nq) / 1000.
import faiss # make faiss available
index = faiss.IndexFlatL2(d) # build the index
print(index.is_trained)
index.add(xb) # add vectors to the index
print(index.ntotal)
k = 4 # we want to see 4 nearest neighbors
D, I = index.search(xb[:5], k) # sanity check
print(I)
print(D)
D, I = index.search(xq, k) # actual search
print(I[:5]) # neighbors of the 5 first queries
print(I[-5:]) # neighbors of the 5 last queries
|
geoplot/datasets.py | QuLogic/geoplot | 1,025 | 12688898 | <gh_stars>1000+
"""
Example dataset fetching utility. Used in docs.
"""
src = 'https://raw.githubusercontent.com/ResidentMario/geoplot-data/master'
def get_path(dataset_name):
"""
Returns the URL path to an example dataset suitable for reading into ``geopandas``.
"""
if dataset_name == 'usa_cities':
return f'{src}/usa-cities.geojson'
elif dataset_name == 'contiguous_usa':
return f'{src}/contiguous-usa.geojson'
elif dataset_name == 'nyc_collision_factors':
return f'{src}/nyc-collision-factors.geojson'
elif dataset_name == 'nyc_boroughs':
return f'{src}/nyc-boroughs.geojson'
elif dataset_name == 'ny_census':
return f'{src}/ny-census-partial.geojson'
elif dataset_name == 'obesity_by_state':
return f'{src}/obesity-by-state.tsv'
elif dataset_name == 'la_flights':
return f'{src}/la-flights.geojson'
elif dataset_name == 'dc_roads':
return f'{src}/dc-roads.geojson'
elif dataset_name == 'nyc_map_pluto_sample':
return f'{src}/nyc-map-pluto-sample.geojson'
elif dataset_name == 'nyc_collisions_sample':
return f'{src}/nyc-collisions-sample.csv'
elif dataset_name == 'boston_zip_codes':
return f'{src}/boston-zip-codes.geojson'
elif dataset_name == 'boston_airbnb_listings':
return f'{src}/boston-airbnb-listings.geojson'
elif dataset_name == 'napoleon_troop_movements':
return f'{src}/napoleon-troop-movements.geojson'
elif dataset_name == 'nyc_fatal_collisions':
return f'{src}/nyc-fatal-collisions.geojson'
elif dataset_name == 'nyc_injurious_collisions':
return f'{src}/nyc-injurious-collisions.geojson'
elif dataset_name == 'nyc_police_precincts':
return f'{src}/nyc-police-precincts.geojson'
elif dataset_name == 'nyc_parking_tickets':
return f'{src}/nyc-parking-tickets-sample.geojson'
elif dataset_name == 'world':
return f'{src}/world.geojson'
elif dataset_name == 'melbourne':
return f'{src}/melbourne.geojson'
elif dataset_name == 'melbourne_schools':
return f'{src}/melbourne-schools.geojson'
elif dataset_name == 'san_francisco':
return f'{src}/san-francisco.geojson'
elif dataset_name == 'san_francisco_street_trees_sample':
return f'{src}/san-francisco-street-trees-sample.geojson'
elif dataset_name == 'california_congressional_districts':
return f'{src}/california-congressional-districts.geojson'
else:
raise ValueError(
f'The dataset_name value {dataset_name!r} is not in the list of valid names.'
)
|
FWCore/MessageService/test/u20_cfg.py | ckamtsikis/cmssw | 852 | 12688903 | # Unit test configuration file for MessageLogger service
# Uses include MessageLogger.cfi and nothing else except time stamp suppression
# Currently output will be jumbled unless cout and cerr are directed separately
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
import FWCore.Framework.test.cmsExceptionsFatal_cff
process.options = FWCore.Framework.test.cmsExceptionsFatal_cff.options
process.load("FWCore.MessageService.test.Services_cff")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.default = cms.untracked.PSet(
noTimeStamps = cms.untracked.bool(True)
)
process.MessageLogger.cerr.noTimeStamps = True
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
process.sendSomeMessages = cms.EDAnalyzer("UnitTestClient_G")
process.p = cms.Path(process.sendSomeMessages)
|
Skype4Py/api/posix_x11.py | low456high/Skype4Py | 199 | 12688914 | <gh_stars>100-1000
"""
Low level *Skype for Linux* interface implemented using *XWindows messaging*.
Uses direct *Xlib* calls through *ctypes* module.
This module handles the options that you can pass to `Skype.__init__`
for Linux machines when the transport is set to *X11*.
No further options are currently supported.
Warning PyGTK framework users
=============================
The multithreaded architecture of Skype4Py requires a special treatment
if the Xlib transport is combined with PyGTK GUI framework.
The following code has to be called at the top of your script, before
PyGTK is even imported.
.. python::
from Skype4Py.api.posix_x11 import threads_init
threads_init()
This function enables multithreading support in Xlib and GDK. If not done
here, this is enabled for Xlib library when the `Skype` object is instantiated.
If your script imports the PyGTK module, doing this so late may lead to a
segmentation fault when the GUI is shown on the screen.
A remedy is to enable the multithreading support before PyGTK is imported
by calling the ``threads_init`` function.
"""
__docformat__ = 'restructuredtext en'
import sys
import threading
import os
from ctypes import *
from ctypes.util import find_library
import time
import logging
from Skype4Py.api import Command, SkypeAPIBase, \
timeout2float, finalize_opts
from Skype4Py.enums import *
from Skype4Py.errors import SkypeAPIError
__all__ = ['SkypeAPI', 'threads_init']
# The Xlib Programming Manual:
# ============================
# http://tronche.com/gui/x/xlib/
# some Xlib constants
PropertyChangeMask = 0x400000
PropertyNotify = 28
ClientMessage = 33
PropertyNewValue = 0
PropertyDelete = 1
# some Xlib types
c_ulong_p = POINTER(c_ulong)
DisplayP = c_void_p
Atom = c_ulong
AtomP = c_ulong_p
XID = c_ulong
Window = XID
Bool = c_int
Status = c_int
Time = c_ulong
c_int_p = POINTER(c_int)
# should the structures be aligned to 8 bytes?
align = (sizeof(c_long) == 8 and sizeof(c_int) == 4)
# some Xlib structures
class XClientMessageEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('serial', c_ulong),
('send_event', Bool),
('pad1', c_int),
('display', DisplayP),
('window', Window),
('message_type', Atom),
('format', c_int),
('pad2', c_int),
('data', c_char * 20)]
else:
_fields_ = [('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', DisplayP),
('window', Window),
('message_type', Atom),
('format', c_int),
('data', c_char * 20)]
class XPropertyEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('serial', c_ulong),
('send_event', Bool),
('pad1', c_int),
('display', DisplayP),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int),
('pad2', c_int)]
else:
_fields_ = [('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', DisplayP),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int)]
class XErrorEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('display', DisplayP),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte)]
else:
_fields_ = [('type', c_int),
('display', DisplayP),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte)]
class XEvent(Union):
if align:
_fields_ = [('type', c_int),
('xclient', XClientMessageEvent),
('xproperty', XPropertyEvent),
('xerror', XErrorEvent),
('pad', c_long * 24)]
else:
_fields_ = [('type', c_int),
('xclient', XClientMessageEvent),
('xproperty', XPropertyEvent),
('xerror', XErrorEvent),
('pad', c_long * 24)]
XEventP = POINTER(XEvent)
if getattr(sys, 'skype4py_setup', False):
# we get here if we're building docs; to let the module import without
# exceptions, we emulate the X11 library using a class:
class X(object):
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
pass
x11 = X()
else:
# load X11 library (Xlib)
libpath = find_library('X11')
if not libpath:
raise ImportError('Could not find X11 library')
x11 = cdll.LoadLibrary(libpath)
del libpath
# setup Xlib function prototypes
x11.XCloseDisplay.argtypes = (DisplayP,)
x11.XCloseDisplay.restype = None
x11.XCreateSimpleWindow.argtypes = (DisplayP, Window, c_int, c_int, c_uint,
c_uint, c_uint, c_ulong, c_ulong)
x11.XCreateSimpleWindow.restype = Window
x11.XDefaultRootWindow.argtypes = (DisplayP,)
x11.XDefaultRootWindow.restype = Window
x11.XDeleteProperty.argtypes = (DisplayP, Window, Atom)
x11.XDeleteProperty.restype = None
x11.XDestroyWindow.argtypes = (DisplayP, Window)
x11.XDestroyWindow.restype = None
x11.XFree.argtypes = (c_void_p,)
x11.XFree.restype = None
x11.XGetAtomName.argtypes = (DisplayP, Atom)
x11.XGetAtomName.restype = c_void_p
x11.XGetErrorText.argtypes = (DisplayP, c_int, c_char_p, c_int)
x11.XGetErrorText.restype = None
x11.XGetWindowProperty.argtypes = (DisplayP, Window, Atom, c_long, c_long, Bool,
Atom, AtomP, c_int_p, c_ulong_p, c_ulong_p, POINTER(POINTER(Window)))
x11.XGetWindowProperty.restype = c_int
x11.XInitThreads.argtypes = ()
x11.XInitThreads.restype = Status
x11.XInternAtom.argtypes = (DisplayP, c_char_p, Bool)
x11.XInternAtom.restype = Atom
x11.XNextEvent.argtypes = (DisplayP, XEventP)
x11.XNextEvent.restype = None
x11.XOpenDisplay.argtypes = (c_char_p,)
x11.XOpenDisplay.restype = DisplayP
x11.XPending.argtypes = (DisplayP,)
x11.XPending.restype = c_int
x11.XSelectInput.argtypes = (DisplayP, Window, c_long)
x11.XSelectInput.restype = None
x11.XSendEvent.argtypes = (DisplayP, Window, Bool, c_long, XEventP)
x11.XSendEvent.restype = Status
x11.XLockDisplay.argtypes = (DisplayP,)
x11.XLockDisplay.restype = None
x11.XUnlockDisplay.argtypes = (DisplayP,)
x11.XUnlockDisplay.restype = None
def threads_init(gtk=True):
"""Enables multithreading support in Xlib and PyGTK.
See the module docstring for more info.
:Parameters:
gtk : bool
May be set to False to skip the PyGTK module.
"""
# enable X11 multithreading
x11.XInitThreads()
if gtk:
from gtk.gdk import threads_init
threads_init()
class SkypeAPI(SkypeAPIBase):
def __init__(self, opts):
self.logger = logging.getLogger('Skype4Py.api.posix_x11.SkypeAPI')
SkypeAPIBase.__init__(self)
finalize_opts(opts)
# initialize threads if not done already by the user
threads_init(gtk=False)
# init Xlib display
self.disp = x11.XOpenDisplay(None)
if not self.disp:
raise SkypeAPIError('Could not open XDisplay')
self.win_root = x11.XDefaultRootWindow(self.disp)
self.win_self = x11.XCreateSimpleWindow(self.disp, self.win_root,
100, 100, 100, 100, 1, 0, 0)
x11.XSelectInput(self.disp, self.win_root, PropertyChangeMask)
self.win_skype = self.get_skype()
ctrl = 'SKYPECONTROLAPI_MESSAGE'
self.atom_msg = x11.XInternAtom(self.disp, ctrl, False)
self.atom_msg_begin = x11.XInternAtom(self.disp, ctrl + '_BEGIN', False)
self.loop_event = threading.Event()
self.loop_timeout = 0.0001
self.loop_break = False
def __del__(self):
if x11:
if hasattr(self, 'disp'):
if hasattr(self, 'win_self'):
x11.XDestroyWindow(self.disp, self.win_self)
x11.XCloseDisplay(self.disp)
def run(self):
self.logger.info('thread started')
# main loop
event = XEvent()
data = ''
while not self.loop_break and x11:
while x11.XPending(self.disp):
self.loop_timeout = 0.0001
x11.XNextEvent(self.disp, byref(event))
# events we get here are already prefiltered by the predicate function
if event.type == ClientMessage:
if event.xclient.format == 8:
if event.xclient.message_type == self.atom_msg_begin:
data = str(event.xclient.data)
elif event.xclient.message_type == self.atom_msg:
if data != '':
data += str(event.xclient.data)
else:
self.logger.warning('Middle of Skype X11 message received with no beginning!')
else:
continue
if len(event.xclient.data) != 20 and data:
self.notify(data.decode('utf-8'))
data = ''
elif event.type == PropertyNotify:
namep = x11.XGetAtomName(self.disp, event.xproperty.atom)
is_inst = (c_char_p(namep).value == '_SKYPE_INSTANCE')
x11.XFree(namep)
if is_inst:
if event.xproperty.state == PropertyNewValue:
self.win_skype = self.get_skype()
# changing attachment status can cause an event handler to be fired, in
# turn it could try to call Attach() and doing this immediately seems to
# confuse Skype (command '#0 NAME xxx' returns '#0 CONNSTATUS OFFLINE' :D);
# to fix this, we give Skype some time to initialize itself
time.sleep(1.0)
self.set_attachment_status(apiAttachAvailable)
elif event.xproperty.state == PropertyDelete:
self.win_skype = None
self.set_attachment_status(apiAttachNotAvailable)
self.loop_event.wait(self.loop_timeout)
if self.loop_event.isSet():
self.loop_timeout = 0.0001
elif self.loop_timeout < 1.0:
self.loop_timeout *= 2
self.loop_event.clear()
self.logger.info('thread finished')
def get_skype(self):
"""Returns Skype window ID or None if Skype not running."""
skype_inst = x11.XInternAtom(self.disp, '_SKYPE_INSTANCE', True)
if not skype_inst:
return
type_ret = Atom()
format_ret = c_int()
nitems_ret = c_ulong()
bytes_after_ret = c_ulong()
winp = pointer(Window())
fail = x11.XGetWindowProperty(self.disp, self.win_root, skype_inst,
0, 1, False, 33, byref(type_ret), byref(format_ret),
byref(nitems_ret), byref(bytes_after_ret), byref(winp))
if not fail and format_ret.value == 32 and nitems_ret.value == 1:
return winp.contents.value
def close(self):
self.loop_break = True
self.loop_event.set()
while self.isAlive():
time.sleep(0.01)
SkypeAPIBase.close(self)
def set_friendly_name(self, friendly_name):
SkypeAPIBase.set_friendly_name(self, friendly_name)
if self.attachment_status == apiAttachSuccess:
# reattach with the new name
self.set_attachment_status(apiAttachUnknown)
self.attach()
def attach(self, timeout, wait=True):
if self.attachment_status == apiAttachSuccess:
return
self.acquire()
try:
if not self.isAlive():
try:
self.start()
except AssertionError:
raise SkypeAPIError('Skype API closed')
try:
self.wait = True
t = threading.Timer(timeout2float(timeout), lambda: setattr(self, 'wait', False))
if wait:
t.start()
while self.wait:
self.win_skype = self.get_skype()
if self.win_skype is not None:
break
else:
time.sleep(1.0)
else:
raise SkypeAPIError('Skype attach timeout')
finally:
t.cancel()
command = Command('NAME %s' % self.friendly_name, '', True, timeout)
self.release()
try:
self.send_command(command, True)
finally:
self.acquire()
if command.Reply != 'OK':
self.win_skype = None
self.set_attachment_status(apiAttachRefused)
return
self.set_attachment_status(apiAttachSuccess)
finally:
self.release()
command = Command('PROTOCOL %s' % self.protocol, Blocking=True)
self.send_command(command, True)
self.protocol = int(command.Reply.rsplit(None, 1)[-1])
def is_running(self):
return (self.get_skype() is not None)
def startup(self, minimized, nosplash):
# options are not supported as of Skype 1.4 Beta for Linux
if not self.is_running():
if os.fork() == 0: # we're the child
os.setsid()
os.execlp('skype', 'skype')
def shutdown(self):
from signal import SIGINT
fh = os.popen('ps -o %p --no-heading -C skype')
pid = fh.readline().strip()
fh.close()
if pid:
os.kill(int(pid), SIGINT)
# Skype sometimes doesn't delete the '_SKYPE_INSTANCE' property
skype_inst = x11.XInternAtom(self.disp, '_SKYPE_INSTANCE', True)
if skype_inst:
x11.XDeleteProperty(self.disp, self.win_root, skype_inst)
self.win_skype = None
self.set_attachment_status(apiAttachNotAvailable)
def send_command(self, command, force=False):
if self.attachment_status != apiAttachSuccess and not force:
self.attach(command.Timeout)
self.push_command(command)
self.notifier.sending_command(command)
cmd = u'#%d %s' % (command.Id, command.Command)
self.logger.debug('sending %s', repr(cmd))
if command.Blocking:
command._event = bevent = threading.Event()
else:
command._timer = timer = threading.Timer(command.timeout2float(), self.pop_command, (command.Id,))
event = XEvent()
event.xclient.type = ClientMessage
event.xclient.display = self.disp
event.xclient.window = self.win_self
event.xclient.message_type = self.atom_msg_begin
event.xclient.format = 8
cmd = cmd.encode('utf-8') + '\x00'
for i in xrange(0, len(cmd), 20):
event.xclient.data = cmd[i:i + 20]
x11.XSendEvent(self.disp, self.win_skype, False, 0, byref(event))
event.xclient.message_type = self.atom_msg
self.loop_event.set()
if command.Blocking:
bevent.wait(command.timeout2float())
if not bevent.isSet():
raise SkypeAPIError('Skype command timeout')
else:
timer.start()
def notify(self, cmd):
self.logger.debug('received %s', repr(cmd))
# Called by main loop for all received Skype commands.
if cmd.startswith(u'#'):
p = cmd.find(u' ')
command = self.pop_command(int(cmd[1:p]))
if command is not None:
command.Reply = cmd[p + 1:]
if command.Blocking:
command._event.set()
else:
command._timer.cancel()
self.notifier.reply_received(command)
else:
self.notifier.notification_received(cmd[p + 1:])
else:
self.notifier.notification_received(cmd)
|
pe_tree/hash_pe.py | lybtongji/pe_tree | 1,271 | 12688928 | #
# Copyright (c) 2020 BlackBerry Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Hash all portions of a PE file, this may be run as a separate process"""
# Standard imports
import json
import hashlib
import pefile
def hash_data(data, entropy_H):
"""Calculate MD5/SHA1/SHA256 of given data
Args:
data (bytes): Data to calculate hashes/entropy
entropy_H (pefile.SectionStructure.entropy_H): Callback function for calculating entropy
Returns:
dict: Dictionary of hashes/entropy
"""
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
md5.update(data)
sha1.update(data)
sha256.update(data)
return {"md5": md5.hexdigest(), "sha1": sha1.hexdigest(), "sha256": sha256.hexdigest(), "entropy": entropy_H(data), "size": len(data)}
def hash_pe_file(filename, data=None, pe=None, json_dumps=True):
"""Calculate PE file hashes.
Either call directly or invoke via processpool::
processpool = multiprocessing.Pool(10)
hashes = json.loads(processpool.apply_async(pe_tree.hash_pe.hash_pe_file, (filename,)).get())
Args:
filename (str): Path to file to hash (or specify via data)
data (bytes, optional): PE file data
pe (pefile.PE, optional): Parsed PE file
json_dumps (bool, optional): Return data as JSON
Returns:
dict: PE file hashes if json_dumps == False
str: JSON PE file hashes if json_dumps == True
"""
if pe is None:
pe = pefile.PE(filename)
# Calculate entropy (use pefile implementation!)
entropy_H = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__, pe=pe).entropy_H
file_hashes = {"file": {"md5": "", "sha1": "", "sha256": "", "entropy": 0.0, "size": 0},
"file_no_overlay": {"md5": "", "sha1": "", "sha256": "", "entropy": 0.0, "size": 0},
"dos_stub": {"md5": "", "sha1": "", "sha256": "", "entropy": 0.0, "size": 0},
"sections": [],
"resources": [],
"security_directory": {"md5": "", "sha1": "", "sha256": "", "entropy": 0.0, "size": 0},
"overlay": {"md5": "", "sha1": "", "sha256": "", "entropy": 0.0, "size": 0}}
if not data:
with open(filename, "rb") as f:
data = f.read()
# Hash entire file
file_hashes["file"] = hash_data(data, entropy_H)
# Hash DOS stub
if pe.DOS_HEADER.e_lfanew > 64:
file_hashes["dos_stub"] = hash_data(data[64:pe.DOS_HEADER.e_lfanew], entropy_H)
# Hash sections
for section in pe.sections:
file_hashes["sections"].append({"md5": section.get_hash_md5(), "sha256": section.get_hash_sha256(), "entropy": section.get_entropy()})
# Hash resources
if hasattr(pe, "DIRECTORY_ENTRY_RESOURCE"):
mapped_data = pe.get_memory_mapped_image()
for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:
if not hasattr(resource_type, "directory"):
continue
for resource_id in resource_type.directory.entries:
if not hasattr(resource_id, "directory"):
continue
for resource_language in resource_id.directory.entries:
if not hasattr(resource_language, "data"):
continue
offset = resource_language.data.struct.OffsetToData
size = resource_language.data.struct.Size
try:
resource_data = mapped_data[offset:offset + size]
except:
resource_data = ""
file_hashes["resources"].append(hash_data(resource_data, entropy_H))
overlay_offset = pe.get_overlay_data_start_offset()
if overlay_offset:
overlay_data = pe.get_overlay()
security = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_SECURITY"]]
if security.VirtualAddress != 0 and security.Size != 0:
size = min(security.Size, len(overlay_data))
# Hash security directory
file_hashes["security_directory"] = hash_data(overlay_data[:size], entropy_H)
overlay_data = overlay_data[size:]
overlay_offset += size
# Hash overlay
file_hashes["overlay"] = hash_data(overlay_data, entropy_H)
file_hashes["file_no_overlay"] = hash_data(data[overlay_offset:], entropy_H)
# Return JSON
if json_dumps:
return json.dumps(file_hashes)
# Return dict
return file_hashes
|
recipes/Python/578882_Monitor_Progress_File_Descriptors_Another/recipe-578882.py | tdiprima/code | 2,023 | 12688929 | #!/usr/bin/env python
#
# fdprogress.py -- by Alfe (<EMAIL>), inspired by azat@Stackoverflow
#
# usage: fdprogress.py <pid>
#
import time, os, os.path
from collections import defaultdict
def getFds(pid):
return os.listdir('/proc/%s/fd/' % pid)
def getPos(pid, fd):
with open('/proc/%s/fdinfo/%s' % (pid, fd)) as f:
return int(f.readline()[5:])
def getSize(pid, fd):
return os.path.getsize(getPath(pid, fd))
class FdIsPipe(Exception): pass
def getPath(pid, fd):
result = os.readlink('/proc/%s/fd/%s' % (pid, fd))
if result.startswith('pipe:['):
raise FdIsPipe(result)
return result
def extendHistory(history, pid):
for fd in getFds(pid):
try:
history[fd, getPath(pid, fd)].append(
(time.time(), getPos(pid, fd), getSize(pid, fd)))
except FdIsPipe:
pass # ignore fds to pipe
def initHistory(pid):
result = defaultdict(list)
extendHistory(result, pid)
return result
def reduceHistory(history):
for key, value in history.iteritems():
if len(value) > 2:
del value[1:-2] # only keep first and last
# (this can be more clever in the future)
def entryPrediction(fd, path, values):
t1, pos1, size1 = values[0]
t2, pos2, size2 = values[-1]
if t1 == t2: # no time passed yet?
return fd, path, (t2, pos2, size2), None, None, None, None, None, None, None
growth = (size2 - size1) / (t2 - t1) # bytes/sec growth of file
if growth != 0:
tSize0 = t1 - size1 / growth # time when size was 0
else:
tSize0 = None
speed = (pos2 - pos1) / (t2 - t1) # speed of pos in bytes/sec
if speed != 0:
tPos0 = t1 - pos1 / speed # time when pos was 0
tPosSize2 = t1 + (size2 - pos1) / speed # time of pos reaching size2
else:
tPos0 = tPosSize2 = None
if speed != growth: # when will both meet?
tm = t2 + (size2 - pos2) / (speed - growth)
sizeM = size2 + growth * (tm - t2)
else:
tm = sizeM = None
return (fd, path, (t2, pos2, size2), growth, speed, tSize0, tPos0,
tPosSize2, tm, sizeM)
def eachPrediction(history):
for (fd, path), values in history.iteritems():
yield entryPrediction(fd, path, values)
def displayTime(t):
if t is None:
return "<>"
d = t - time.time()
try:
lt = time.localtime(t)
except:
return "??"
return (
time.strftime("%%F (now%+dy)" % (d/86400/365), lt)
if abs(d) > 2 * 86400 * 365 else
time.strftime("%%F (now%+dM)" % (d/86400/30), lt)
if abs(d) > 2 * 86400 * 30 else
time.strftime("%%F (now%+dd)" % (d/86400), lt)
if abs(d) > 2 * 86400 else
time.strftime("%%a, %%T (now%+dh)" % (d/3600), lt)
if time.strftime('%F', lt) != time.strftime('%F', time.localtime()) else
time.strftime("%%T (now%+dh)" % (d/3600), lt)
if abs(d) > 2 * 3600 else
time.strftime("%%T (now%+dm)" % (d/60), lt)
if abs(d) > 2 * 60 else
time.strftime("%%T (now%+ds)" % d, lt))
def displaySize(size):
return (
"<>" if size is None else
"%d B" % size
if size < 1e3 else
"%.2f kB" % (size / 1e3)
if size < 1e6 else
"%.2f MB" % (size / 1e6)
if size < 1e9 else
"%.2f GB" % (size / 1e9))
def displaySpeed(speed):
return displaySize(speed) + "/s"
def printPrediction(history):
for (fd, path, (t2, pos2, size2), growth, speed, tSize0, tPos0,
tPosSize2, tm, sizeM) in eachPrediction(history):
print '\n', fd, "->", os.path.basename(path)
dT = displayTime
dSi = displaySize
dSp = displaySpeed
print "size:", dSi(size2), "\tgrowth:", dSp(growth), \
"\t\tpos:", dSi(pos2), "\tspeed:", dSp(speed)
print "emptyTime:", dT(tSize0), "\tstartTime:", dT(tPos0), \
"\treachTime:", dT(tPosSize2), "\tmeetTime:", dT(tm)
def main(argv):
pid = argv[1]
history = initHistory(pid)
while True:
os.system('clear')
printPrediction(history)
extendHistory(history, pid)
reduceHistory(history)
time.sleep(1.0)
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
DQMOffline/Trigger/python/PhotonMonitor_cfi.py | ckamtsikis/cmssw | 852 | 12688940 | import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.photonMonitoring_cfi import photonMonitoring
hltPhotonmonitoring = photonMonitoring.clone()
hltPhotonmonitoring.FolderName = cms.string('HLT/Photon/Photon200/')
hltPhotonmonitoring.histoPSet.lsPSet = cms.PSet(
nbins = cms.uint32 ( 250 ),
xmin = cms.double( 0.),
xmax = cms.double( 2500.),
)
hltPhotonmonitoring.histoPSet.photonPSet = cms.PSet(
nbins = cms.uint32( 500 ),
xmin = cms.double( 0.0),
xmax = cms.double(5000),
)
hltPhotonmonitoring.met = cms.InputTag("pfMetEI") # pfMet
hltPhotonmonitoring.jets = cms.InputTag("pfJetsEI") # ak4PFJets, ak4PFJetsCHS
hltPhotonmonitoring.electrons = cms.InputTag("gedGsfElectrons") # while pfIsolatedElectronsEI are reco::PFCandidate !
hltPhotonmonitoring.photons = cms.InputTag("gedPhotons") # while pfIsolatedElectronsEI are reco::PFCandidate !
hltPhotonmonitoring.numGenericTriggerEventPSet.andOr = cms.bool( False )
#hltPhotonmonitoring.numGenericTriggerEventPSet.dbLabel = cms.string("ExoDQMTrigger") # it does not exist yet, we should consider the possibility of using the DB, but as it is now it will need a label per path !
hltPhotonmonitoring.numGenericTriggerEventPSet.andOrHlt = cms.bool(True)# True:=OR; False:=AND
hltPhotonmonitoring.numGenericTriggerEventPSet.hltInputTag = cms.InputTag( "TriggerResults::HLT" )
hltPhotonmonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Photon175_v*") # HLT_ZeroBias_v*
#hltPhotonmonitoring.numGenericTriggerEventPSet.hltDBKey = cms.string("EXO_HLT_MET")
hltPhotonmonitoring.numGenericTriggerEventPSet.errorReplyHlt = cms.bool( False )
hltPhotonmonitoring.numGenericTriggerEventPSet.verbosityLevel = cms.uint32(1)
hltPhotonmonitoring.denGenericTriggerEventPSet.andOr = cms.bool( False )
hltPhotonmonitoring.denGenericTriggerEventPSet.andOrHlt = cms.bool( True )
hltPhotonmonitoring.denGenericTriggerEventPSet.hltInputTag = cms.InputTag( "TriggerResults::HLT" )
hltPhotonmonitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_PFJet40_v*","HLT_PFJet60_v*","HLT_PFJet80_v*") # HLT_ZeroBias_v*
hltPhotonmonitoring.denGenericTriggerEventPSet.errorReplyHlt = cms.bool( False )
hltPhotonmonitoring.denGenericTriggerEventPSet.dcsInputTag = cms.InputTag( "scalersRawToDigi" )
hltPhotonmonitoring.denGenericTriggerEventPSet.dcsPartitions = cms.vint32 ( 24, 25, 26, 27, 28, 29 ) # 24-27: strip, 28-29: pixel, we should add all other detectors !
hltPhotonmonitoring.denGenericTriggerEventPSet.andOrDcs = cms.bool( False )
hltPhotonmonitoring.denGenericTriggerEventPSet.errorReplyDcs = cms.bool( True )
hltPhotonmonitoring.denGenericTriggerEventPSet.verbosityLevel = cms.uint32(1)
|
var/spack/repos/builtin/packages/ruby-erubis/package.py | kkauder/spack | 2,360 | 12688959 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class RubyErubis(RubyPackage):
"""Erubis is a fast, secure, and very extensible implementation of eRuby.
"""
homepage = "http://www.kuwata-lab.com/erubis/"
git = "https://github.com/kwatch/erubis.git"
version('master', branch='master')
version('2.7.0', commit='<PASSWORD>')
def patch(self):
filter_file('$Release$', str(self.version),
'erubis.gemspec', string=True)
|
albu-solution/src/pytorch_utils/concrete_eval.py | Hulihrach/RoadDetector | 180 | 12688960 | import os
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import numpy as np
from .eval import Evaluator
class FullImageEvaluator(Evaluator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def process_batch(self, predicted, model, data, prefix=""):
names = data['image_name']
for i in range(len(names)):
self.on_image_constructed(names[i], predicted[i,...], prefix)
def save(self, name, prediction, prefix=""):
cv2.imwrite(os.path.join(self.save_dir, prefix + name), (prediction * 255).astype(np.uint8))
class CropEvaluator(Evaluator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.current_mask = None
self.current_prediction = None
self.current_image_name = None
def process_batch(self, predicted, model, data, prefix=""):
names = data['image_name']
config = self.config
batch_geometry = self.parse_geometry(data['geometry'])
for i in range(len(names)):
name = names[i]
geometry = batch_geometry[i]
sx, sy = geometry['sx'], geometry['sy']
pred = self.cut_border(np.squeeze(predicted[i,...]))
if name != self.current_image_name:
if self.current_image_name is None:
self.current_image_name = name
else:
self.on_image_constructed(self.current_image_name, self.current_prediction / self.current_mask, prefix=prefix)
self.construct_big_image(geometry)
self.current_prediction[sy + self.border:sy + config.target_rows - self.border, sx + self.border:sx + config.target_cols - self.border] += pred
self.current_mask[sy+self.border:sy + config.target_rows - self.border, sx + self.border:sx + config.target_cols - self.border] += 1
self.current_image_name = name
def parse_geometry(self, batch_geometry):
rows = batch_geometry['rows'].numpy()
cols = batch_geometry['cols'].numpy()
sx = batch_geometry['sx'].numpy()
sy = batch_geometry['sy'].numpy()
geometries = []
for idx in range(rows.shape[0]):
geometry = {'rows': rows[idx],
'cols': cols[idx],
'sx': sx[idx],
'sy': sy[idx]}
geometries.append(geometry)
return geometries
def construct_big_image(self, geometry):
self.current_mask = np.zeros((geometry['rows'], geometry['cols']), np.uint8)
self.current_prediction = np.zeros((geometry['rows'], geometry['cols']), np.float32)
def save(self, name, prediction, prefix=""):
cv2.imwrite(os.path.join(self.save_dir, prefix + name), (prediction * 255).astype(np.uint8))
def post_predict_action(self, prefix):
self.on_image_constructed(self.current_image_name, self.current_prediction / self.current_mask, prefix=prefix)
self.current_image_name = None
|
dl_coursera/markup.py | nodamu/dl_coursera | 111 | 12688964 | import logging
import copy
import re
from urllib.parse import quote
import bs4
import jinja2
from .resource import load_resource
from .define import URL_ROOT
def _is_root(e):
return e.parent is None
def _is_tag(e):
return isinstance(e, bs4.Tag)
def _has_no_child(tag):
return len(tag.contents) == 0
class Traversal:
'''depth-first traversal
'''
def __init__(self, root, *, tagOnly=False):
assert _is_root(root)
self._e = root
self._skip_children = False
self._tagOnly = tagOnly
def __iter__(self):
return self
def __next__(self):
e = self._next()
if e is None:
raise StopIteration('no more element')
self._e = e
self._skip_children = False
return e
def _next(self):
if (not self._skip_children) and _is_tag(self._e):
for e in self._e.children:
if (not self._tagOnly) or _is_tag(e):
return e
e = self._e
while True:
if _is_root(e):
return None
for _e in e.next_siblings:
if (not self._tagOnly) or _is_tag(_e):
return _e
e = e.parent
def skip_children(self):
self._skip_children = True
class CML:
def __init__(self, doc):
doc = doc.translate(doc.maketrans('\u21b5', ' '))
self._root = bs4.BeautifulSoup(doc, 'lxml-xml')
self._assets = None
self._assetIDs = None
self._refids = None
self._html = None
def get_resources(self, *, _pat_ref=re.compile(r'%s/learn/[^/]+/resources/([0-9a-zA-Z-]+)' % URL_ROOT)):
if self._assets is None:
self._assets = []
self._assetIDs = []
self._refids = []
for e in Traversal(self._root, tagOnly=True):
if e.name == 'asset':
self._assetIDs.append(e['id'])
elif e.name == 'img':
if e.get('src'):
import uuid
from .lib.misc import url_basename
from .define import Asset
id_ = str(uuid.uuid4()); e['assetId'] = id_
url = e['src']
name = url_basename(url)
self._assets.append(Asset(id_=id_, url=url, name=name))
else:
self._assetIDs.append(e['assetId'])
elif e.name == 'a':
match = _pat_ref.match(e['href'])
if match:
_ = match.group(1)
self._refids.append(_)
e['refid'] = _
return self._assets, self._assetIDs, self._refids
def to_html(self, *, assets):
if self._html is not None:
return self._html
asset_by_id = {_['id']: _ for _ in assets}
def _assetName(id_):
return asset_by_id[id_]['name']
html = bs4.BeautifulSoup('', 'lxml')
d = {}
def _add(e0, e1):
parent1 = html
_e = e0
while _e is not None:
if id(_e) in d:
parent1 = d[id(_e)]
break
_e = _e.parent
if (parent1 is html) and (not _is_tag(e0)):
return
if _is_tag(e0):
d[id(e0)] = e1
parent1.append(e1)
tr = Traversal(self._root)
for e0 in tr:
if isinstance(e0, bs4.NavigableString):
_li = str(e0).split('$$')
hasMath = False
for _ in _li:
if not hasMath:
_add(e0, _)
else:
_span = bs4.Tag(name='span')
_span['hasMath'] = 'true'
_span.append(_)
_add(e0, _span)
hasMath = not hasMath
continue
if not _is_tag(e0):
continue
if e0.name == 'asset':
assert _has_no_child(e0)
e1 = bs4.Tag(name='p')
e1['class'] = 'asset'
e1.append(_assetName(e0['id']))
elif e0.name == 'img':
assert _has_no_child(e0)
e1 = bs4.Tag(name='img')
e1['src'] = e1['alt'] = _assetName(e0['assetId'])
e1['src'] = quote(e1['src'])
elif e0.name == 'heading':
e1 = bs4.Tag(name='h%d' % int(e0['level']))
elif e0.name == 'text':
e1 = bs4.Tag(name='p')
elif e0.name == 'list':
bulletType = e0['bulletType']
if bulletType == 'numbers':
e1 = bs4.Tag(name='ol')
e1['type'] = '1'
elif bulletType == 'bullets':
e1 = bs4.Tag(name='ul')
else:
e1 = bs4.Tag(name='ul')
logging.warning('[CML] unknown bulletType=%s' % bulletType)
elif e0.name == 'a':
e1 = bs4.Tag(name='a')
e1['href'] = e0['href']
if e0.get('refid'):
e1['refid'] = e0['refid']
elif e0.name == 'code':
e1 = bs4.Tag(name='pre')
e1.append(copy.copy(e0))
tr.skip_children()
elif e0.name in ['li', 'strong', 'em', 'u', 'table', 'tr', 'td', 'th', 'sup', 'sub']:
e1 = bs4.Tag(name=e0.name)
elif e0.name in ['co-content']:
continue
else:
logging.warning('[CML] unknown e0.name=%s\n%s' % (e0.name, e0))
continue
_add(e0, e1)
self._html = str(html)
return self._html
def render_supplement(*, content, resource_path, title='', __={}):
if __.get('template') is None:
__['template'] = jinja2.Template(load_resource('template/supplement.html').decode('UTF-8'))
return __['template'].render(content=content, resource_path=resource_path, title=title)
|
tests/utils/object_factory.py | chschroeder/small-text | 218 | 12688991 | <reponame>chschroeder/small-text<gh_stars>100-1000
import numpy as np
from small_text.active_learner import PoolBasedActiveLearner
def get_initialized_active_learner(clf_factory, query_strategy, dataset):
active_learner = PoolBasedActiveLearner(clf_factory, query_strategy, dataset)
x_indices_initial = np.random.choice(np.arange(len(dataset)), size=10, replace=False)
y_initial = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1])
active_learner.initialize_data(x_indices_initial, y_initial)
return active_learner
|
tests/st/ops/ascend/test_drop_out_gen_mask.py | GuoSuiming/mindspore | 3,200 | 12688996 | <filename>tests/st/ops/ascend/test_drop_out_gen_mask.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE,
device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.mask = P.DropoutGenMask(10, 28)
self.shape = P.Shape()
def construct(self, x_, y_):
shape_x = self.shape(x_)
return self.mask(shape_x, y_)
x = np.ones([2, 4, 2, 2]).astype(np.int32)
y = np.array([1.0]).astype(np.float32)
def test_net():
mask = Net()
tx, ty = Tensor(x), Tensor(y)
output = mask(tx, ty)
print(output.asnumpy())
assert ([255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255] == output.asnumpy()).all()
|
azure-kusto-data/tests/test_converter.py | artunduman/azure-kusto-python | 134 | 12689003 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License
import unittest
from datetime import timedelta
from azure.kusto.data._converters import to_datetime, to_timedelta
class ConverterTests(unittest.TestCase):
"""These are unit tests that should test custom converters used in."""
def test_to_timestamp(self):
"""Happy path to test converter from TimeSpan to timedelta."""
# Test hours, minutes and seconds
assert to_timedelta("00:00:00") == timedelta(seconds=0)
assert to_timedelta("00:00:03") == timedelta(seconds=3)
assert to_timedelta("00:04:03") == timedelta(minutes=4, seconds=3)
assert to_timedelta("02:04:03") == timedelta(hours=2, minutes=4, seconds=3)
# Test milliseconds
assert to_timedelta("00:00:00.099") == timedelta(milliseconds=99)
assert to_timedelta("02:04:03.0123") == timedelta(hours=2, minutes=4, seconds=3, microseconds=12300)
# Test days
assert to_timedelta("01.00:00:00") == timedelta(days=1)
assert to_timedelta("02.04:05:07") == timedelta(days=2, hours=4, minutes=5, seconds=7)
# Test negative
assert to_timedelta("-01.00:00:00") == -timedelta(days=1)
assert to_timedelta("-02.04:05:07") == -timedelta(days=2, hours=4, minutes=5, seconds=7)
# Test all together
assert to_timedelta("00.00:00:00.000") == timedelta(seconds=0)
assert to_timedelta("02.04:05:07.789") == timedelta(days=2, hours=4, minutes=5, seconds=7, milliseconds=789)
assert to_timedelta("03.00:00:00.111") == timedelta(days=3, milliseconds=111)
# Test from Ticks
assert to_timedelta(-80080008) == timedelta(microseconds=-8008001)
assert to_timedelta(10010001) == timedelta(microseconds=1001000)
def test_to_timestamp_fail(self):
"""
Sad path to test TimeSpan to timedelta converter
"""
self.assertRaises(ValueError, to_timedelta, "")
self.assertRaises(ValueError, to_timedelta, "foo")
self.assertRaises(ValueError, to_timedelta, "00")
self.assertRaises(ValueError, to_timedelta, "00:00")
self.assertRaises(ValueError, to_timedelta, "03.00:00:00.")
self.assertRaises(ValueError, to_timedelta, "03.00:00:00.111a")
def test_to_datetime(self):
"""Tests datetime read by KustoResultIter"""
assert to_datetime("2016-06-07T16:00:00Z") is not None
def test_to_datetime_fail(self):
"""Tests that invalid strings fails to convert to datetime"""
self.assertRaises(ValueError, to_datetime, "invalid")
|
tests/components/panel_custom/__init__.py | domwillcode/home-assistant | 30,023 | 12689019 | <gh_stars>1000+
"""Tests for the panel_custom component."""
|
examples/implicit_orientation_learning/processors.py | niqbal996/paz | 300 | 12689041 | <reponame>niqbal996/paz
from paz.abstract import Processor
import numpy as np
class MakeDictionary(Processor):
def __init__(self, encoder, renderer):
super(MakeDictionary, self).__init__()
self.latent_dimension = encoder.encoder.output_shape[1]
self.encoder = encoder
self.renderer = renderer
def call(self):
data = self.renderer.render()
dictionary = {}
latent_vectors = np.zeros((len(data), self.latent_dimension))
for sample_arg, sample in enumerate(data):
image = sample['image']
latent_vectors[sample_arg] = self.encoder(image)
dictionary[sample_arg] = image
dictionary['latent_vectors'] = latent_vectors
return dictionary
class MeasureSimilarity(Processor):
def __init__(self, dictionary, measure):
super(MeasureSimilarity, self).__init__()
self.dictionary = dictionary
self.measure = measure
def call(self, latent_vector):
latent_vectors = self.dictionary['latent_vectors']
measurements = self.measure(latent_vectors, latent_vector)
closest_image = self.dictionary[np.argmax(measurements)]
return latent_vector, closest_image
|
psdaq/psdaq/pyxpm/surf/devices/silabs/_Si5345Lite.py | ZhenghengLi/lcls2 | 134 | 12689047 | #-----------------------------------------------------------------------------
# This file is part of 'SLAC Firmware Standard Library'.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of 'SLAC Firmware Standard Library', including this file,
# may be copied, modified, propagated, or distributed except according to
# the terms contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
import surf.devices.silabs as silabs
import csv
import click
import fnmatch
import rogue
class Si5345Lite(pr.Device):
def __init__(self,
simpleDisplay = True,
advanceUser = False,
liteVersion = True,
**kwargs):
self._useVars = rogue.Version.greaterThanEqual('5.4.0')
if self._useVars:
size = 0
else:
size = (0x1000 << 2) # 16KB
super().__init__(size=size, **kwargs)
self.add(pr.LocalVariable(
name = "CsvFilePath",
description = "Used if command's argument is empty",
mode = "RW",
value = "",
))
##############################
# Commands
##############################
@self.command(value='',description="Load the .CSV from CBPro.",)
def LoadCsvFile(arg):
# Check if non-empty argument
if (arg != ""):
path = arg
else:
# Use the variable path instead
path = self.CsvFilePath.get()
# Check for .csv file
if fnmatch.fnmatch(path, '*.csv'):
click.secho( f'{self.path}.LoadCsvFile(): {path}', fg='green')
else:
click.secho( f'{self.path}.LoadCsvFile(): {path} is not .csv', fg='red')
return
# Power down during the configuration load
self.Page0.PDN.set(True)
# Open the .CSV file
with open(path) as csvfile:
reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
# Loop through the rows in the CSV file
for row in reader:
if (row[0]!='Address'):
self._setValue(
offset = (int(row[0],16)<<2),
data = int(row[1],16),
)
# Update local RemoteVariables and verify conflagration
self.readBlocks(recurse=True)
self.checkBlocks(recurse=True)
# Execute the Page5.BW_UPDATE_PLL command
self.Page5.BW_UPDATE_PLL()
# Power Up after the configuration load
self.Page0.PDN.set(False)
# Clear the internal error flags
self.Page0.ClearIntErrFlag()
##############################
# Pages
##############################
self._pages = {
0: silabs.Si5345Page0(offset=(0x000<<2),simpleDisplay=simpleDisplay,expand=False), # 0x0000 - 0x03FF
1: silabs.Si5345Page1(offset=(0x100<<2),simpleDisplay=simpleDisplay,expand=False,hidden=not(advanceUser),liteVersion=liteVersion), # 0x0400 - 0x07FF
2: silabs.Si5345Page2(offset=(0x200<<2),simpleDisplay=simpleDisplay,expand=False,hidden=not(advanceUser),liteVersion=liteVersion), # 0x0800 - 0x0BFF
3: silabs.Si5345Page3(offset=(0x300<<2),simpleDisplay=simpleDisplay,expand=False,hidden=not(advanceUser),liteVersion=liteVersion), # 0x0C00 - 0x0FFF
4: silabs.Si5345Page4(offset=(0x400<<2),simpleDisplay=simpleDisplay,expand=False,hidden=not(advanceUser),liteVersion=liteVersion), # 0x1000 - 0x13FF
5: silabs.Si5345Page5(offset=(0x500<<2),simpleDisplay=simpleDisplay,expand=False,hidden=not(advanceUser),liteVersion=liteVersion), # 0x1400 - 0x17FF
6: silabs.Si5345PageBase(name='Page6',offset=(0x600<<2),expand=False,hidden=not(advanceUser)), # 0x1800 - 0x1BFF
7: silabs.Si5345PageBase(name='Page7',offset=(0x700<<2),expand=False,hidden=not(advanceUser)), # 0x1C00 - 0x1FFF
8: silabs.Si5345PageBase(name='Page8',offset=(0x800<<2),expand=False,hidden=not(advanceUser)), # 0x2000 - 0x23FF
9: silabs.Si5345Page9(offset=(0x900<<2),simpleDisplay=simpleDisplay,expand=False,hidden=not(advanceUser),liteVersion=liteVersion), # 0x2400 - 0x27FF
10: silabs.Si5345PageA(offset=(0xA00<<2),simpleDisplay=simpleDisplay,expand=False,hidden=not(advanceUser),liteVersion=liteVersion), # 0x2800 - 0x2BFF
11: silabs.Si5345PageB(offset=(0xB00<<2),simpleDisplay=simpleDisplay,expand=False,hidden=not(advanceUser),liteVersion=liteVersion), # 0x2C00 - 0x2FFF
}
# Add Pages
for k,v in self._pages.items():
self.add(v)
self.add(pr.LinkVariable(
name = 'Locked',
description = 'Inverse of LOL',
mode = 'RO',
dependencies = [self.Page0.LOL],
linkedGet = lambda: (False if self.Page0.LOL.value() else True)
))
def _setValue(self,offset,data):
if self._useVars:
# Note: index is byte index (not word index)
self._pages[offset // 0x400].DataBlock.set(value=data,index=(offset%0x400)>>2)
else:
self._rawWrite(offset,data) # Deprecated
|
AlphaZero/AlphaMCTS.py | morozig/muzero | 111 | 12689048 | <filename>AlphaZero/AlphaMCTS.py
"""
Contains logic for performing Monte Carlo Tree Search having access to the environment.
The class is an adaptation of AlphaZero-General's MCTS search to accommodate non-adversarial environments (MDPs).
We utilize the MinMax scaling of backed-up rewards for the UCB formula and (by default) compute the UCB using
the formula proposed by the MuZero algorithm. The MCTS returns both the estimated root-value and action probabilities.
The MCTS also discounts backed up rewards given that gamma < 1.
Notes:
- Adapted from https://github.com/suragnair/alpha-zero-general
- Base implementation done.
- Documentation 15/11/2020
"""
import typing
import numpy as np
from AlphaZero.AlphaNeuralNet import AlphaZeroNeuralNet
from utils import DotDict
from utils.selfplay_utils import MinMaxStats, GameHistory, GameState
EPS = 1e-8
class MCTS:
"""
This class handles the MCTS tree while having access to the environment logic.
"""
CANONICAL: bool = False # Whether to compute the UCB formula using AlphaZero's formula (true) or MuZero's formula.
def __init__(self, game, neural_net: AlphaZeroNeuralNet, args: DotDict) -> None:
"""
Initialize all requisite variables for performing MCTS for AlphaZero.
:param game: Game Implementation of Game class for environment logic.
:param neural_net: AlphaNeuralNet Implementation of AlphaNeuralNet class for inference.
:param args: DotDict Data structure containing parameters for the tree search.
"""
self.game = game
self.neural_net = neural_net
self.args = args
# Static helper variables.
self.single_player = game.n_players == 1
self.action_size = game.getActionSize()
# Gets reinitialized at every search
self.minmax = MinMaxStats(self.args.minimum_reward, self.args.maximum_reward)
self.Qsa = {} # stores Q values for s,a (as defined in the paper)
self.Ssa = {} # stores state transitions for s, a
self.Rsa = {} # stores R values for s,a
self.Nsa = {} # stores #times edge s,a was visited
self.Ns = {} # stores #times board s was visited
self.Ps = {} # stores initial policy (returned by neural net)
self.Vs = {} # stores game.getValidMoves for board s
def clear_tree(self) -> None:
""" Clear all statistics stored in the current search tree """
self.Qsa, self.Ssa, self.Rsa, self.Nsa, self.Ns, self.Ps, self.Vs = [{} for _ in range(7)]
def initialize_root(self, state: GameState, trajectory: GameHistory) -> typing.Tuple[bytes, float]:
"""
Perform initial inference for the root state and perturb the network prior with Dirichlet noise.
Additionally mask the illegal moves in the network prior and initialize all statistics for starting the
MCTS search.
:param state: GameState Data structure containing the current state of the environment.
:param trajectory: GameHistory Data structure containing the entire episode trajectory of the agent(s).
:return: tuple (hash, root_value) The hash of the environment state and inferred root-value.
"""
network_input = trajectory.stackObservations(self.neural_net.net_args.observation_length, state.observation)
pi_0, v_0 = self.neural_net.predict(network_input)
s_0 = self.game.getHash(state)
# Add Dirichlet Exploration noise
noise = np.random.dirichlet([self.args.dirichlet_alpha] * len(pi_0))
self.Ps[s_0] = noise * self.args.exploration_fraction + (1 - self.args.exploration_fraction) * pi_0
# Mask the prior for illegal moves, and re-normalize accordingly.
self.Vs[s_0] = self.game.getLegalMoves(state)
self.Ps[s_0] *= self.Vs[s_0]
self.Ps[s_0] = self.Ps[s_0] / np.sum(self.Ps[s_0])
# Sum of visit counts of the edges/ children and legal moves.
self.Ns[s_0] = 0
return s_0, v_0
def compute_ucb(self, s: bytes, a: int, exploration_factor: float) -> float:
"""
Compute the UCB for an edge (s, a) within the MCTS tree:
PUCT(s, a) = MinMaxNormalize(Q(s, a)) + P(s, a) * sqrt(visits_parent / (1 + visits_s)) * exploration_factor
Where the exploration factor is either the exploration term of MuZero (default) or a float c_1.
Illegal edges are returned as zeros. The Q values within the tree are MinMax normalized over the
accumulated statistics over the current tree search.
:param s: hash Key of the current state inside the MCTS tree.
:param a: int Action key representing the path to reach the child node from path (s, a)
:param exploration_factor: float Pre-computed exploration factor from the MuZero PUCT formula.
:return: float Upper confidence bound with neural network prior
"""
if s in self.Vs and not self.Vs[s][a]:
return 0
visit_count = self.Nsa[(s, a)] if (s, a) in self.Nsa else 0
q_value = self.minmax.normalize(self.Qsa[(s, a)]) if (s, a) in self.Qsa else 0
c_children = np.max([self.Ns[s], 1e-8]) # Ensure that prior doesn't collapse to 0 if s is new.
# Exploration
if self.CANONICAL:
# Standard PUCT formula from the AlphaZero paper
ucb = self.Ps[s][a] * np.sqrt(c_children) / (1 + visit_count) * self.args.c1
else:
# The PUCT formula from the MuZero paper
ucb = self.Ps[s][a] * np.sqrt(c_children) / (1 + visit_count) * exploration_factor
ucb += q_value # Exploitation
return ucb
def runMCTS(self, state: GameState, trajectory: GameHistory, temp: int = 1) -> typing.Tuple[np.ndarray, float]:
"""
This function performs 'num_MCTS_sims' simulations of MCTS starting from the provided root GameState.
Before the search we only clear statistics stored inside the MinMax tree. In this way we ensure that
reward bounds get refreshed over time/ don't get affected by strong reward scaling in past searches.
This implementation, thus, reuses state state transitions from past searches. This may influence memory usage.
Our estimation of the root-value of the MCTS tree search is based on a sample average of each backed-up
MCTS value. This means that this estimate represents an on-policy estimate V^pi.
Illegal moves are masked before computing the action probabilities.
:param state: GameState Data structure containing the current state of the environment.
:param trajectory: GameHistory Data structure containing the entire episode trajectory of the agent(s).
:param temp: float Visit count exponentiation factor. A value of 0 = Greedy, +infinity = uniformly random.
:return: tuple (pi, v) The move probabilities of MCTS and the estimated root-value of the policy.
"""
# Refresh value bounds in the tree
self.minmax.refresh()
# Initialize the root variables needed for MCTS.
s_0, v_0 = self.initialize_root(state, trajectory)
# Aggregate root state value over MCTS back-propagated values
v_search = sum([self._search(state, trajectory) for _ in range(self.args.num_MCTS_sims - 1)])
v = (v_0 + (v_search if self.single_player else -v_search)) / self.args.num_MCTS_sims
# MCTS Visit count array for each edge 'a' from root node 's_0'.
counts = np.array([self.Nsa[(s_0, a)] if (s_0, a) in self.Nsa else 0 for a in range(self.action_size)])
if temp == 0: # Greedy selection. One hot encode the most visited paths (randomly break ties).
move_probabilities = np.zeros(len(counts))
move_probabilities[np.argmax(counts + np.random.randn(len(counts)) * 1e-8)] = 1
else:
counts = np.power(counts, 1. / temp)
move_probabilities = counts / np.sum(counts)
return move_probabilities, v
def _search(self, state: GameState, trajectory: GameHistory, path: typing.Tuple[int, ...] = tuple()) -> float:
"""
Recursively perform MCTS search inside the actual environments with search-paths guided by the PUCT formula.
Selection chooses an action for expanding/ traversing the edge (s, a) within the tree search.
The exploration_factor for the PUCT formula is computed within this function for efficiency:
exploration_factor = c1 * log(visits_s + c2 + 1) - log(c2)
Setting AlphaMCTS.CANONICAL to true sets exploration_factor just to c1.
If an edge is expanded, we perform a step within the environment (with action a) and observe the state
transition, reward, and infer the new move probabilities, and state value. If an edge is traversed, we simply
look up earlier inferred/ observed values from the class dictionaries.
During backup we update the current value estimates of an edge Q(s, a) using an average, we additionally
update the MinMax statistics to get reward/ value boundaries for the PUCT formula. Note that backed-up
values get discounted for gamma < 1. For adversarial games, we negate the backed up value G_k at each backup.
The actual search-path 'path' is kept as a debugging-variable, it currently has no practical use. This method
may raise a recursion error if the environment creates cycles, this should be highly improbable for most
environments. If this does occur, the environment can be altered to terminate after n visits to some cycle.
:param state: GameState Numerical prediction of the state by the encoder/ dynamics model.
:param trajectory: GameHistory Data structure containing all observations until the current search-depth.
:param path: tuple of integers representing the tree search-path of the current function call.
:return: float The backed-up discounted/ Monte-Carlo returns (dependent on gamma) of the tree search.
:raises RecursionError: When cycles occur within the search path, the search can get stuck *ad infinitum*.
"""
s = self.game.getHash(state)
### SELECTION
# pick the action with the highest upper confidence bound
exploration_factor = self.args.c1 + np.log(self.Ns[s] + self.args.c2 + 1) - np.log(self.args.c2)
confidence_bounds = np.asarray([self.compute_ucb(s, a, exploration_factor) for a in range(self.action_size)])
a = np.flatnonzero(self.Vs[s])[np.argmax(confidence_bounds[self.Vs[s].astype(bool)])] # Get masked argmax.
# Default leaf node value. Future possible future reward is 0. Variable is overwritten if edge is non-terminal.
value = 0
if (s, a) not in self.Ssa: ### ROLLOUT for valid moves
next_state, reward = self.game.getNextState(state, a, clone=True)
s_next = self.game.getHash(next_state)
# Transition statistics.
self.Rsa[(s, a)], self.Ssa[(s, a)], self.Ns[s_next] = reward, next_state, 0
# Inference for non-terminal nodes.
if not next_state.done:
# Build network input for inference.
network_input = trajectory.stackObservations(self.neural_net.net_args.observation_length,
state.observation)
prior, value = self.neural_net.predict(network_input)
# Inference statistics. Alternate value perspective due to adversary (model predicts for next player).
self.Ps[s_next], self.Vs[s_next] = prior, self.game.getLegalMoves(next_state)
value = value if self.single_player else -value
elif not self.Ssa[(s, a)].done: ### EXPANSION
trajectory.observations.append(state.observation) # Build up an observation trajectory inside the tree
value = self._search(self.Ssa[(s, a)], trajectory, path + (a,))
trajectory.observations.pop() # Clear tree observation trajectory when backing up
### BACKUP
gk = self.Rsa[(s, a)] + self.args.gamma * value # (Discounted) Value of the current node
if (s, a) in self.Qsa:
self.Qsa[(s, a)] = (self.Nsa[(s, a)] * self.Qsa[(s, a)] + gk) / (self.Nsa[(s, a)] + 1)
self.Nsa[(s, a)] += 1
else:
self.Qsa[(s, a)] = gk
self.Nsa[(s, a)] = 1
self.minmax.update(self.Qsa[(s, a)])
self.Ns[s] += 1
return gk if self.single_player else -gk
|
optimus/engines/cudf/rows.py | ironmussa/Optimus | 1,045 | 12689049 | <filename>optimus/engines/cudf/rows.py
import functools
import operator
from optimus.engines.base.cudf.rows import CUDFBaseRows
import cudf
import pandas as pd
from optimus.engines.base.dataframe.rows import DataFrameBaseRows
from optimus.engines.base.cudf.rows import CUDFBaseRows
from optimus.engines.base.rows import BaseRows
class Rows(DataFrameBaseRows, CUDFBaseRows, BaseRows):
pass |
pythran/tests/user_defined_import/global_init_alias_main.py | davidbrochart/pythran | 1,647 | 12689086 | <reponame>davidbrochart/pythran<filename>pythran/tests/user_defined_import/global_init_alias_main.py
import global_init as gi
XX = [gi.aa(), 3]
#pythran export bb()
def bb():
return XX
|
importer/models.py | juliecentofanti172/juliecentofanti.github.io | 134 | 12689087 | <filename>importer/models.py
"""
See the module-level docstring for implementation details
"""
from django.core.validators import MinValueValidator
from django.db import models
# FIXME: these classes should have names which more accurately represent what they do
class TaskStatusModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
last_started = models.DateTimeField(
help_text="Last time when a worker started processing this job",
null=True,
blank=True,
)
completed = models.DateTimeField(
help_text="Time when the job completed without error", null=True, blank=True
)
failed = models.DateTimeField(
help_text="Time when the job failed due to an error", null=True, blank=True
)
status = models.TextField(
help_text="Status message, if any, from the last worker", blank=True, default=""
)
task_id = models.UUIDField(
help_text="UUID of the last Celery task to process this record",
null=True,
blank=True,
)
class Meta:
abstract = True
class ImportJob(TaskStatusModel):
"""
Represents a request by a user to import item(s) from a remote URL
"""
created_by = models.ForeignKey("auth.User", null=True, on_delete=models.SET_NULL)
project = models.ForeignKey("concordia.Project", on_delete=models.CASCADE)
url = models.URLField(verbose_name="Source URL for the entire job")
def __str__(self):
return "ImportJob(created_by=%s, project=%s, url=%s)" % (
self.created_by.username,
self.project.title,
self.url,
)
class ImportItem(TaskStatusModel):
"""
Record of the task status for each Item being imported
"""
job = models.ForeignKey(ImportJob, on_delete=models.CASCADE, related_name="items")
url = models.URLField()
item = models.ForeignKey("concordia.Item", on_delete=models.CASCADE)
class Meta:
unique_together = (("job", "item"),)
def __str__(self):
return "ImportItem(job=%s, url=%s)" % (self.job, self.url)
class ImportItemAsset(TaskStatusModel):
"""
Record of the task status for each Asset being imported
"""
import_item = models.ForeignKey(
ImportItem, on_delete=models.CASCADE, related_name="assets"
)
url = models.URLField()
sequence_number = models.PositiveIntegerField(validators=[MinValueValidator(1)])
asset = models.ForeignKey("concordia.Asset", on_delete=models.CASCADE)
class Meta:
unique_together = (("import_item", "sequence_number"), ("import_item", "asset"))
def __str__(self):
return "ImportItemAsset(import_item=%s, url=%s)" % (self.import_item, self.url)
|
lightbus/serializers/by_field.py | gcollard/lightbus | 178 | 12689119 | """ Serializers suitable for transports which support multiple fields per message
These serializers handle moving data to/from a dictionary
format. The format looks like this::
# Message metadata first. Each value is implicitly a utf8 string
id: 'ZOCTLh1CEeimW3gxwcOTbg=='
api_name: 'my_company.auth'
procedure_name: 'check_password'
return_path: 'redis+key://my_company.auth.check_password:result:ZOCTLh1CEeimW3gxwcOTbg=='
# kwargs follow, each encoded with the provided encoder (in this case JSON)
kw:username: '"admin"'
kw:password: '"<PASSWORD>"'
"""
from typing import TYPE_CHECKING
from lightbus.serializers.base import (
decode_bytes,
sanity_check_metadata,
MessageSerializer,
MessageDeserializer,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,cyclic-import
from lightbus import Message
class ByFieldMessageSerializer(MessageSerializer):
def __call__(self, message: "Message") -> dict:
"""Takes a message object and returns a serialised dictionary representation
See the module-level docs (above) for further details
"""
serialized = message.get_metadata()
for k, v in message.get_kwargs().items():
serialized[":{}".format(k)] = self.encoder(v)
return serialized
class ByFieldMessageDeserializer(MessageDeserializer):
def __call__(self, serialized: dict, *, native_id=None, **extra):
"""Takes a dictionary of serialised fields and returns a Message object
See the module-level docs (above) for further details
"""
metadata = {}
kwargs = {}
for k, v in serialized.items():
k = decode_bytes(k)
v = decode_bytes(v)
if not k:
continue
# kwarg fields start with a ':', everything else is metadata
if k[0] == ":":
# kwarg values need decoding
kwargs[k[1:]] = self.decoder(v)
else:
# metadata args are implicitly strings, so we don't need to decode them
metadata[k] = v
sanity_check_metadata(self.message_class, metadata)
if "native_id" in metadata:
native_id = metadata.pop("native_id")
return self.message_class.from_dict(
metadata=metadata, kwargs=kwargs, native_id=native_id, **extra
)
|
parsl/tests/test_python_apps/test_depfail_propagation.py | cylondata/parsl | 323 | 12689153 | <filename>parsl/tests/test_python_apps/test_depfail_propagation.py
from parsl import python_app
from parsl.dataflow.error import DependencyError
@python_app
def fails():
raise ValueError("Deliberate failure")
@python_app
def depends(parent):
return 1
def test_depfail_once():
"""Test the simplest dependency failure case"""
f1 = fails()
f2 = depends(f1)
assert isinstance(f1.exception(), Exception)
assert not isinstance(f1.exception(), DependencyError)
assert isinstance(f2.exception(), DependencyError)
def test_depfail_chain():
"""Test that dependency failures chain"""
f1 = fails()
f2 = depends(f1)
f3 = depends(f2)
f4 = depends(f3)
assert isinstance(f1.exception(), Exception)
assert not isinstance(f1.exception(), DependencyError)
assert isinstance(f2.exception(), DependencyError)
assert isinstance(f3.exception(), DependencyError)
assert isinstance(f4.exception(), DependencyError)
def test_depfail_branches():
"""Test that dependency failures propagate in the
presence of multiple downstream tasks."""
f1 = fails()
f2 = depends(f1)
f3 = depends(f1)
assert isinstance(f1.exception(), Exception)
assert not isinstance(f1.exception(), DependencyError)
assert isinstance(f2.exception(), DependencyError)
assert isinstance(f3.exception(), DependencyError)
|
env/lib/python3.6/site-packages/py4j/tests/java_tls_test.py | jenngeorge/kafka-practice | 111 | 12689156 | """
Created on Feb 2, 2016
@author: <NAME>
"""
from __future__ import unicode_literals, absolute_import
from multiprocessing import Process
import subprocess
import unittest
import ssl
import os
import sys
from py4j.java_gateway import (
JavaGateway, CallbackServerParameters,
set_default_callback_accept_timeout, GatewayParameters)
from py4j.tests.java_gateway_test import (
PY4J_JAVA_PATH, safe_shutdown, sleep)
set_default_callback_accept_timeout(0.125)
def start_example_tls_server():
subprocess.call([
"java", "-cp", PY4J_JAVA_PATH,
"py4j.examples.ExampleSSLApplication"])
def start_example_tls_process():
p = Process(target=start_example_tls_server)
p.start()
sleep()
return p
class Adder(object):
def doOperation(self, i, j):
return i + j
class Java:
implements = ["py4j.examples.Operator"]
if sys.version_info >= (2, 7):
# ssl.SSLContext introduced in Python 2.7
class TestIntegration(unittest.TestCase):
"""Tests cases borrowed from other files, but executed over a
TLS connection.
"""
def setUp(self):
key_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"selfsigned.pem")
client_ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_ssl_context.verify_mode = ssl.CERT_REQUIRED
client_ssl_context.check_hostname = True
client_ssl_context.load_verify_locations(cafile=key_file)
server_ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_ssl_context.load_cert_chain(key_file, password='password')
callback_server_parameters = CallbackServerParameters(
ssl_context=server_ssl_context)
# address must match cert, because we're checking hostnames
gateway_parameters = GatewayParameters(
address='localhost',
ssl_context=client_ssl_context)
self.p = start_example_tls_process()
self.gateway = JavaGateway(
gateway_parameters=gateway_parameters,
callback_server_parameters=callback_server_parameters)
# It seems SecureServerSocket may need a little more time to
# initialize on some platforms/slow machines.
sleep(0.500)
def tearDown(self):
safe_shutdown(self)
self.p.join()
sleep()
def testUnicode(self):
sleep()
sb = self.gateway.jvm.java.lang.StringBuffer()
sb.append("\r\n\tHello\r\n\t")
self.assertEqual("\r\n\tHello\r\n\t", sb.toString())
def testMethodConstructor(self):
sleep()
adder = Adder()
oe1 = self.gateway.jvm.py4j.examples.OperatorExample()
# Test method
oe1.randomBinaryOperator(adder)
# Test constructor
oe2 = self.gateway.jvm.py4j.examples.OperatorExample(adder)
self.assertTrue(oe2 is not None)
if __name__ == "__main__":
unittest.main()
|
HunterSense/model/request_log.py | tt9133github/hunter | 322 | 12689165 | #!/ usr/bin/env
# coding=utf-8
#
# Copyright 2019 ztosec & https://www.zto.com/
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
author: b5mali4
"""
import time
import datetime
from peewee import *
from .base_model import BaseModel
from .base_model import BaseModelService
from common.sqllite_util import SqliteManage
class RequestLog(BaseModel):
"""
To Create Table:
>>> if __name__ == "__main__":
>>> RequestLog.create_table()
ip : 目标IP
port: 目标端口
protocol: 协议
time: 时间
plugin: 插件名称
recv_data: 原始请求数据
time_stamp: 时间戳
"""
ip = TextField(default='')
port = TextField(default='')
protocol = TextField(null=True)
time_str = DateTimeField(formats='%Y-%m-%d %H:%M:%S', default=datetime.datetime.now)
plugin = TextField(default='')
recv_data = TextField(default='')
time_stamp = FloatField(null=True, default=time.mktime(datetime.datetime.now().timetuple()))
class Meta:
database = SqliteManage.get_database()
class RequestLogService:
@staticmethod
def get_fields_by_where(**kwargs):
"""
To use:
>>> request_logs = RequestLogService.get_fields_by_where(fields=(RequestLog.port, RequestLog.plugin), where=(RequestLog.protocol == 'udp'))
>>> print(request_logs)
:param kwargs:
:return:
"""
return BaseModelService.get_fields_by_where(RequestLog, **kwargs)
@staticmethod
def remove(**kwargs):
"""
数据库删除操作
To use:
>>> RequestLogService.remove(where=(RequestLog.id == 26))
:param kwargs:
:return:
"""
return BaseModelService.remove(RequestLog, **kwargs)
@staticmethod
def count(**kwargs):
"""
数据数量
To use:
>>> RequestLogService.count(where=(RequestLog.id == 26))
:param kwargs:
:return:
"""
return BaseModelService.count(RequestLog, **kwargs)
@staticmethod
def update(**kwargs):
"""
更新操作
To use:
>>> RequestLogService.update(fields=({RequestLog.port: "7998" }))
:param kwargs:
:return:
"""
return BaseModelService.update(RequestLog, **kwargs)
@staticmethod
def save(**kwargs):
"""
更新操作
To use:
>>> RequestLogService.save(ip="127.0.0.1")
:param kwargs:
:return:
"""
return BaseModelService.save(RequestLog, **kwargs)
|
nextgen/bcbio/pipeline/shared.py | bgruening/bcbb | 339 | 12689185 | <gh_stars>100-1000
"""Pipeline functionality shared amongst multiple analysis types.
"""
import os
import collections
from contextlib import closing
import pysam
from bcbio import broad
from bcbio.pipeline.alignment import get_genome_ref
from bcbio.utils import file_exists, safe_makedir, save_diskspace
from bcbio.distributed.transaction import file_transaction
# ## Split/Combine helpers
def combine_bam(in_files, out_file, config):
"""Parallel target to combine multiple BAM files.
"""
runner = broad.runner_from_config(config)
runner.run_fn("picard_merge", in_files, out_file)
for in_file in in_files:
save_diskspace(in_file, "Merged into {0}".format(out_file), config)
runner.run_fn("picard_index", out_file)
return out_file
def process_bam_by_chromosome(output_ext, file_key, default_targets=None, dir_ext_fn=None):
"""Provide targets to process a BAM file by individual chromosome regions.
output_ext: extension to supply to output files
file_key: the key of the BAM file in the input data map
default_targets: a list of extra chromosome targets to process, beyond those specified
in the BAM file. Useful for retrieval of non-mapped reads.
dir_ext_fn: A function to retrieve a directory naming extension from input data map.
"""
if default_targets is None:
default_targets = []
def _do_work(data):
bam_file = data[file_key]
out_dir = os.path.dirname(bam_file)
if dir_ext_fn:
out_dir = os.path.join(out_dir, dir_ext_fn(data))
out_file = os.path.join(out_dir, "{base}{ext}".format(
base=os.path.splitext(os.path.basename(bam_file))[0],
ext=output_ext))
part_info = []
if not file_exists(out_file):
work_dir = safe_makedir(
"{base}-split".format(base=os.path.splitext(out_file)[0]))
with closing(pysam.Samfile(bam_file, "rb")) as work_bam:
for chr_ref in list(work_bam.references) + default_targets:
chr_out = os.path.join(work_dir,
"{base}-{ref}{ext}".format(
base=os.path.splitext(os.path.basename(bam_file))[0],
ref=chr_ref, ext=output_ext))
part_info.append((chr_ref, chr_out))
return out_file, part_info
return _do_work
def write_nochr_reads(in_file, out_file):
"""Write a BAM file of reads that are not on a reference chromosome.
This is useful for maintaining non-mapped reads in parallel processes
that split processing by chromosome.
"""
if not file_exists(out_file):
with closing(pysam.Samfile(in_file, "rb")) as in_bam:
with file_transaction(out_file) as tx_out_file:
with closing(pysam.Samfile(tx_out_file, "wb", template=in_bam)) as out_bam:
for read in in_bam:
if read.tid < 0:
out_bam.write(read)
return out_file
def subset_bam_by_region(in_file, region, out_file_base = None):
"""Subset BAM files based on specified chromosome region.
"""
if out_file_base is not None:
base, ext = os.path.splitext(out_file_base)
else:
base, ext = os.path.splitext(in_file)
out_file = "%s-subset%s%s" % (base, region, ext)
if not file_exists(out_file):
with closing(pysam.Samfile(in_file, "rb")) as in_bam:
target_tid = in_bam.gettid(region)
assert region is not None, \
"Did not find reference region %s in %s" % \
(region, in_file)
with file_transaction(out_file) as tx_out_file:
with closing(pysam.Samfile(tx_out_file, "wb", template=in_bam)) as out_bam:
for read in in_bam:
if read.tid == target_tid:
out_bam.write(read)
return out_file
def subset_variant_regions(variant_regions, region, out_file):
"""Return BED file subset by a specified chromosome region.
variant_regions is a BED file, region is a chromosome name.
"""
if region is None:
return variant_regions
elif variant_regions is None:
return region
elif region.find(":") > 0:
raise ValueError("Partial chromosome regions not supported")
else:
# create an ordered subset file for processing
subset_file = "{0}-regions.bed".format(os.path.splitext(out_file)[0])
items = []
with open(variant_regions) as in_handle:
for line in in_handle:
if line.startswith(region) and line.split("\t")[0] == region:
start = int(line.split("\t")[1])
items.append((start, line))
if len(items) > 0:
if not os.path.exists(subset_file):
with open(subset_file, "w") as out_handle:
items.sort()
for _, line in items:
out_handle.write(line)
return subset_file
else:
return region
# ## Retrieving file information from configuration variables
def configured_ref_file(name, config, sam_ref):
"""Full path to a reference file specified in the configuration.
Resolves non-absolute paths relative to the base genome reference directory.
"""
ref_file = config["algorithm"].get(name, None)
if ref_file:
if not os.path.isabs(ref_file):
base_dir = os.path.dirname(os.path.dirname(sam_ref))
ref_file = os.path.join(base_dir, ref_file)
return ref_file
def configured_vrn_files(config, sam_ref):
"""Full path to all configured files for variation assessment.
"""
names = ["dbsnp", "train_hapmap", "train_1000g_omni", "train_indels"]
VrnFiles = collections.namedtuple("VrnFiles", names)
return apply(VrnFiles, [configured_ref_file(n, config, sam_ref) for n in names])
def ref_genome_info(info, config, dirs):
"""Retrieve reference genome information from configuration variables.
"""
genome_build = info.get("genome_build", None)
(_, sam_ref) = get_genome_ref(genome_build, config["algorithm"]["aligner"],
dirs["galaxy"])
return genome_build, sam_ref
|
desktop/core/ext-py/urllib2_kerberos-0.1.6/setup.py | kokosing/hue | 5,079 | 12689186 | <reponame>kokosing/hue<filename>desktop/core/ext-py/urllib2_kerberos-0.1.6/setup.py<gh_stars>1000+
# Copyright 2008 Lime Nest LLC
# Copyright 2008 Lime Spot LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import hgvers
setup(
name = "urllib2_kerberos",
version = hgvers.version,
py_modules = [ 'urllib2_kerberos' ],
# install_requires = ['kerberos'],
author = "<NAME>",
author_email = "<EMAIL>",
description = "Kerberos over HTTP Negotiate/SPNEGO support for urllib2",
license = "Apache 2.0",
url = "http://limedav.com/hg/urllib2_kerberos/",
keywords = "urllib2 kerberos http negotiate spnego",
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Systems Administration :: Authentication/Directory'
]
)
|
python_toolbox/wx_tools/widgets/cute_bitmap_button.py | hboshnak/python_toolbox | 119 | 12689197 | # Copyright 2009-2011 <NAME>.
# This program is distributed under the LGPL2.1 license.
import wx
from python_toolbox.wx_tools.widgets.cute_button import CuteButton
class CuteBitmapButton(wx.BitmapButton, CuteButton):
def __init__(self, parent, id=-1, bitmap=wx.NullBitmap,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.BU_AUTODRAW, validator=wx.DefaultValidator,
name=wx.ButtonNameStr, bitmap_disabled=None, tool_tip=None,
help_text=None):
wx.BitmapButton.__init__(self, parent=parent, id=id, bitmap=bitmap,
pos=pos, size=size, style=style,
validator=validator, name=name)
if bitmap_disabled is not None:
self.SetBitmapDisabled(bitmap_disabled)
self.set_tool_tip_and_help_text(tool_tip, help_text)
|
scripts/monitoring/cron-haproxy-close-wait.py | fahlmant/openshift-tools | 164 | 12689230 | #!/usr/bin/python
# vim: expandtab:tabstop=4:shiftwidth=4
# Disabling invalid-name because pylint doesn't like the naming convention we have.
# pylint: disable=invalid-name
''' Tool to detect haproxy processes that have been running longer than
the most recent haproxy process and have connection in CLOSE_WAIT state
and stop them '''
import argparse
import psutil
import time
# Reason: disable pylint import-error because our libs aren't loaded on jenkins.
# Status: temporary until we start testing in a container where our stuff is installed.
# pylint: disable=import-error
from openshift_tools.monitoring.metric_sender import MetricSender
# This script is a mitigation for this bug:
# https://bugzilla.redhat.com/show_bug.cgi?id=1364870
ZABBIX_KEY = "openshift.haproxy.close-wait"
class HAProxy(object):
''' wrapper for finding and stopping stuck haproxy processes '''
def __init__(self):
''' constructor '''
self.args = None
self.current_time = None
def dprint(self, msg):
''' wrap printing debug messages '''
if self.args.debug:
print msg
def parse_args(self):
''' get user args '''
parser = argparse.ArgumentParser(description='haproxy killer')
parser.add_argument('--debug', default=False, action='store_true')
self.args = parser.parse_args()
def get_etimes(self, proc):
''' Return elapsed time for proc in seconds '''
return int(self.current_time - proc.create_time())
def get_all_haproxy_procs(self):
''' build dict of elapsed times mapped to PIDs for all
haproxy processes running '''
all_procs = {}
for proc in psutil.process_iter():
try:
if proc.name() == 'haproxy':
elapsed = self.get_etimes(proc)
all_procs[elapsed] = proc.pid
except psutil.NoSuchProcess:
pass
return all_procs
def kill(self):
''' class entrypoint '''
self.parse_args()
self.current_time = time.time()
haproxy_procs_etimes = self.get_all_haproxy_procs()
# identify most recent haproxy process
# and remove it from list of haproxy processes
try:
youngest_etimes = min(haproxy_procs_etimes.keys())
youngest_pid = haproxy_procs_etimes[youngest_etimes]
self.dprint("Youngest haproxy PID: {}".format(youngest_pid))
haproxy_procs_etimes.pop(youngest_etimes)
except ValueError:
pass
# find processes that have connections only in 'CLOSE-WAIT' state
kill_list = []
for proc in haproxy_procs_etimes.values():
try:
only_close_wait = True
process = psutil.Process(proc)
for conn in process.connections():
if conn.status != 'CLOSE_WAIT' and conn.status != 'FIN_WAIT2':
only_close_wait = False
break
if only_close_wait:
self.dprint("PID: {} marked for removal".format(proc))
kill_list.append(proc)
except psutil.NoSuchProcess:
pass
# stop processes on the kill_list
kill_count = 0
for proc in kill_list:
try:
process = psutil.Process(proc)
self.dprint("Stopping PID: {}".format(process.pid))
process.kill()
kill_count += 1
except psutil.NoSuchProcess:
pass
print "Stopped {} haproxy processes".format(kill_count)
ms = MetricSender()
ms.add_metric({ZABBIX_KEY : kill_count})
ms.send_metrics()
if __name__ == '__main__':
hap = HAProxy()
hap.kill()
|
test/com/facebook/buck/testutil/endtoend/testdata/fix/fix.py | Unknoob/buck | 8,027 | 12689249 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import json
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--build-details", required=True)
parser.add_argument("--exit-code", type=int, default=0)
parser.add_argument("--wait", action="store_true")
args = parser.parse_args()
with open(args.build_details, "r") as fin:
js = json.load(fin)
if args.wait:
user_data = sys.stdin.read().strip()
print("user entered {}".format(user_data), file=sys.stderr)
print("build_id: {}".format(js["build_id"]), file=sys.stderr)
print("command: {}".format(js["command"]), file=sys.stderr)
print("exit_code: {}".format(js["exit_code"]), file=sys.stderr)
print(
"jasabi_fix: {}".format(js["buck_provided_scripts"]["jasabi_fix"][0]),
file=sys.stderr,
)
print("manually_invoked: {}".format(js["manually_invoked"]), file=sys.stderr)
sys.exit(args.exit_code)
|
service_checker.py | nettargets/bern | 143 | 12689264 | from datetime import datetime
import math
import numpy as np
import random
from utils \
import is_good, is_get_good, send_mail, test_bern_get, test_bern_post, query
FROM_GMAIL_ADDR = 'YOUR_GMAIL_ADDR'
FROM_GMAIL_ACCOUNT_PASSWORD = '<PASSWORD>'
TO_EMAIL_ADDR = 'TO_EMAIL_ADDR'
def check_bern(from_gmail, to_email, from_google_account, from_google_password):
results = list()
# 0. raw text
results.append(is_good())
# 1. pmid, json
results.append(is_get_good(29446767, 'json', 3, 10))
# 2. pmid, pubtator
results.append(is_get_good(29446767, 'pubtator', 3, 10))
# 3. mutiple pmid
results.append(is_get_good([29446767, 25681199], 'json', 4, 32))
acceptables = ['success', 'tmtool error']
problems = list()
for ridx, r in enumerate(results):
if r in acceptables:
continue
problems.append('{}: {}'.format(ridx, r))
if len(problems) == 0:
print(datetime.now(), 'No problem')
else:
problems_total = ', '.join(problems)
print(datetime.now(), 'Found', problems_total)
send_mail(from_gmail, to_email,
'[BERN] Error(s) {}'.format(problems_total),
'\n'.join(problems),
from_google_account, from_google_password)
def benchmark(tries, batch_size=None, log_interval=100):
mutation_times = list()
ner_times = list()
normalization_times = list()
total_times = list()
pmids = random.sample(range(0, 31113013), tries)
print('pmids[:10]', pmids[:min(10, tries)])
if batch_size is not None:
batch_pmids = list()
num_batches = math.ceil(len(pmids) / batch_size)
for i in range(num_batches):
# last
if i == num_batches - 1:
batch_pmids.append(pmids[i * batch_size:])
else:
batch_pmids.append(pmids[i * batch_size:(i+1) * batch_size])
pmids = batch_pmids
num_na = 0
num_not_list = 0
num_not_dict = 0
ooi_list = list()
num_error_dict = dict()
with open('benchmark.tsv', 'w', encoding='utf-8') as f:
for pidx, pmid in enumerate(pmids):
res_dict_list = query(pmid)
if type(res_dict_list) is not list:
print('not list', pmid, sep='\t')
num_not_list += 1
continue
if type(res_dict_list[0]) is not dict:
print('not dict', pmid, sep='\t')
num_not_dict += 1
continue
if 'text' in res_dict_list[0]:
if 'out of index range' in res_dict_list[0]['text']:
ooi_list.append(pmid)
print('out of index range', pmid, sep='\t')
elif 'BioC.key' in res_dict_list[0]['text']:
num_na += 1
# print(res_dict_list[0]['text'], pmid, sep='\t')
elif 'error: ' in res_dict_list[0]['text'] \
and 'elapsed_time' not in res_dict_list[0]:
if res_dict_list[0]['text'] in num_error_dict:
num_error_dict[res_dict_list[0]['text']] += 1
else:
num_error_dict[res_dict_list[0]['text']] = 1
if 'elapsed_time' not in res_dict_list[0]:
# print('no elapsed_time', pmid, sep='\t')
continue
elapsed_time_dict = res_dict_list[0]['elapsed_time']
mutation_times.append(elapsed_time_dict['tmtool'])
ner_times.append(elapsed_time_dict['ner'])
normalization_times.append(elapsed_time_dict['normalization'])
total_times.append(elapsed_time_dict['total'])
valid_results = len(mutation_times)
if pidx > 0 and (pidx + 1) % log_interval == 0:
print(datetime.now(), '{}/{}'.format(pidx + 1, tries),
'#valid_results', valid_results, '#N/A', num_na,
'#not_list', num_not_list, '#not_dict', num_not_dict,
'#ooi', len(ooi_list), ooi_list, '#err', num_error_dict)
if valid_results > 0 and valid_results % log_interval == 0:
print(datetime.now(), '#valid_results', valid_results)
mutation_res = \
'\t'.join(['{:.3f}'.format(v)
for v in get_stats(mutation_times,
batch_size=batch_size)])
ner_res = \
'\t'.join(['{:.3f}'.format(v)
for v in get_stats(ner_times,
batch_size=batch_size)])
normalization_res = \
'\t'.join(['{:.3f}'.format(v)
for v in get_stats(normalization_times,
batch_size=batch_size)])
total_res = \
'\t'.join(['{:.3f}'.format(v)
for v in get_stats(total_times,
batch_size=batch_size)])
print(valid_results, 'mutation', mutation_res, sep='\t')
print(valid_results, 'ner', ner_res, sep='\t')
print(valid_results, 'normalization', normalization_res,
sep='\t')
print(valid_results, 'total', total_res, sep='\t')
f.write('{}\t{}\t{}\n'.format(valid_results, 'mutation NER',
mutation_res))
f.write('{}\t{}\t{}\n'.format(valid_results, 'NER',
ner_res))
f.write('{}\t{}\t{}\n'.format(valid_results, 'normalization',
normalization_res))
f.write('{}\t{}\t{}\n'.format(valid_results, 'total',
total_res))
f.flush()
print('#valid_results', len(mutation_times))
print('mutation',
'\t'.join(['{:.3f}'.format(v)
for v in get_stats(mutation_times,
batch_size=batch_size)]), sep='\t')
print('ner',
'\t'.join(['{:.3f}'.format(v)
for v in get_stats(ner_times,
batch_size=batch_size)]), sep='\t')
print('normalization',
'\t'.join(['{:.3f}'.format(v)
for v in get_stats(normalization_times,
batch_size=batch_size)]), sep='\t')
print('total',
'\t'.join(['{:.3f}'.format(v)
for v in get_stats(total_times,
batch_size=batch_size)]), sep='\t')
def get_stats(lst, batch_size=None):
if not lst:
return None
if batch_size is None:
return sum(lst) / len(lst), np.std(lst), min(lst), max(lst)
else:
return (sum(lst) / len(lst)) / batch_size, \
np.std(lst), min(lst) / batch_size, max(lst) / batch_size
def stress_test(num_threads, wait_seconds, num_try):
test_bern_get(num_threads, wait_seconds, num_try)
test_bern_post('CLAPO syndrome: identification of somatic activating '
'PIK3CA mutations and delineation of the natural history '
'and phenotype. Purpose CLAPO syndrome is a rare vascular '
'disorder characterized by capillary malformation of the '
'lower lip, lymphatic malformation predominant on the face'
' and neck, asymmetry and partial/generalized overgrowth. '
'Here we tested the hypothesis that, although the genetic '
'cause is not known, the tissue distribution of the '
'clinical manifestations in CLAPO seems to follow a '
'pattern of somatic mosaicism. Methods We clinically '
'evaluated a cohort of 13 patients with CLAPO and screened'
' 20 DNA blood/tissue samples from 9 patients using '
'high-throughput, deep sequencing. Results We identified '
'five activating mutations in the PIK3CA gene in affected '
'tissues from 6 of the 9 patients studied; one of the '
'variants (NM_006218.2:c.248T>C; p.Phe83Ser) has not been '
'previously described in developmental disorders. '
'Conclusion We describe for the first time the presence '
'of somatic activating PIK3CA mutations in patients with '
'CLAPO. We also report an update of the phenotype and '
'natural history of the syndrome.',
num_threads, wait_seconds, num_try)
if __name__ == '__main__':
check_bern(FROM_GMAIL_ADDR, TO_EMAIL_ADDR,
FROM_GMAIL_ADDR, FROM_GMAIL_ACCOUNT_PASSWORD)
|
nni/experiment/config/utils/internal.py | dutxubo/nni | 2,305 | 12689266 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Utility functions for experiment config classes, internal part.
If you are implementing a config class for a training service, it's unlikely you will need these.
"""
import dataclasses
import importlib
import json
import os.path
from pathlib import Path
import socket
import typeguard
import nni.runtime.config
from .public import is_missing
__all__ = [
'get_base_path', 'set_base_path', 'unset_base_path', 'resolve_path',
'case_insensitive', 'camel_case',
'is_instance', 'validate_type', 'is_path_like',
'guess_config_type', 'guess_list_config_type',
'training_service_config_factory', 'load_training_service_config',
'get_ipv4_address'
]
## handle relative path ##
_current_base_path = None
def get_base_path():
if _current_base_path is None:
return Path()
return _current_base_path
def set_base_path(path):
global _current_base_path
assert _current_base_path is None
_current_base_path = path
def unset_base_path():
global _current_base_path
_current_base_path = None
def resolve_path(path, base_path):
if path is None:
return None
# Path.resolve() does not work on Windows when file not exist, so use os.path instead
path = os.path.expanduser(path)
if not os.path.isabs(path):
path = os.path.join(base_path, path)
return str(os.path.realpath(path)) # it should be already str, but official doc does not specify it's type
## field name case convertion ##
def case_insensitive(key):
return key.lower().replace('_', '')
def camel_case(key):
words = key.strip('_').split('_')
return words[0] + ''.join(word.title() for word in words[1:])
## type hint utils ##
def is_instance(value, type_hint):
try:
typeguard.check_type('_', value, type_hint)
except TypeError:
return False
return True
def validate_type(config):
class_name = type(config).__name__
for field in dataclasses.fields(config):
value = getattr(config, field.name)
#check existense
if is_missing(value):
raise ValueError(f'{class_name}: {field.name} is not set')
if not is_instance(value, field.type):
raise ValueError(f'{class_name}: type of {field.name} ({repr(value)}) is not {field.type}')
def is_path_like(type_hint):
# only `PathLike` and `Any` accepts `Path`; check `int` to make sure it's not `Any`
return is_instance(Path(), type_hint) and not is_instance(1, type_hint)
## type inference ##
def guess_config_type(obj, type_hint):
ret = guess_list_config_type([obj], type_hint, _hint_list_item=True)
return ret[0] if ret else None
def guess_list_config_type(objs, type_hint, _hint_list_item=False):
# avoid circular import
from ..base import ConfigBase
from ..training_service import TrainingServiceConfig
# because __init__ of subclasses might be complex, we first create empty objects to determine type
candidate_classes = []
for cls in _all_subclasses(ConfigBase):
if issubclass(cls, TrainingServiceConfig): # training service configs are specially handled
continue
empty_list = [cls.__new__(cls)]
if _hint_list_item:
good_type = is_instance(empty_list[0], type_hint)
else:
good_type = is_instance(empty_list, type_hint)
if good_type:
candidate_classes.append(cls)
if not candidate_classes: # it does not accept config type
return None
if len(candidate_classes) == 1: # the type is confirmed, raise error if cannot convert to this type
return [candidate_classes[0](**obj) for obj in objs]
# multiple candidates available, call __init__ to further verify
candidate_configs = []
for cls in candidate_classes:
try:
configs = [cls(**obj) for obj in objs]
except Exception:
continue
candidate_configs.append(configs)
if not candidate_configs:
return None
if len(candidate_configs) == 1:
return candidate_configs[0]
# still have multiple candidates, choose the common base class
for base in candidate_configs:
base_class = type(base[0])
is_base = all(isinstance(configs[0], base_class) for configs in candidate_configs)
if is_base:
return base
return None # cannot detect the type, give up
def _all_subclasses(cls):
subclasses = set(cls.__subclasses__())
return subclasses.union(*[_all_subclasses(subclass) for subclass in subclasses])
def training_service_config_factory(platform):
cls = _get_ts_config_class(platform)
if cls is None:
raise ValueError(f'Bad training service platform: {platform}')
return cls()
def load_training_service_config(config):
if isinstance(config, dict) and 'platform' in config:
cls = _get_ts_config_class(config['platform'])
if cls is not None:
return cls(**config)
return config # not valid json, don't touch
def _get_ts_config_class(platform):
from ..training_service import TrainingServiceConfig # avoid circular import
# import all custom config classes so they can be found in TrainingServiceConfig.__subclasses__()
custom_ts_config_path = nni.runtime.config.get_config_file('training_services.json')
with custom_ts_config_path.open() as config_file:
custom_ts_config = json.load(config_file)
for custom_ts_pkg in custom_ts_config.keys():
pkg = importlib.import_module(custom_ts_pkg)
_config_class = pkg.nni_training_service_info.config_class
for cls in TrainingServiceConfig.__subclasses__():
if cls.platform == platform:
return cls
return None
## misc ##
def get_ipv4_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('192.0.2.0', 80))
addr = s.getsockname()[0]
s.close()
return addr
|
examples/inference/python/export/fairseq/native_fs_transformer_export.py | hexisyztem/lightseq | 106 | 12689283 | <filename>examples/inference/python/export/fairseq/native_fs_transformer_export.py<gh_stars>100-1000
"""
Export native Fairseq Transformer models to protobuf/hdf5 format.
Refer to the `examples/training/fairseq` directory for more training details.
"""
from collections import OrderedDict
import torch
from export.proto.transformer_pb2 import Transformer
from lightseq.training.ops.pytorch.export import (
gather_token_embedding,
fill_pb_layer,
export_ls_config,
)
from lightseq.training.ops.pytorch.util import get_pos_embedding
import lightseq.inference as lsi
from export.util import parse_args, save_model
enc_layer_mapping_dict = OrderedDict(
{
"multihead_norm_scale": "self_attn_layer_norm weight",
"multihead_norm_bias": "self_attn_layer_norm bias",
"multihead_project_kernel_qkv": "self_attn q_proj weight&&self_attn k_proj weight&&self_attn v_proj weight&&expression_.transpose(0, 1)",
"multihead_project_bias_qkv": "self_attn q_proj bias&&self_attn k_proj bias&&self_attn v_proj bias",
"multihead_project_kernel_output": "self_attn out_proj weight&&expression_.transpose(0, 1)",
"multihead_project_bias_output": "self_attn out_proj bias",
"ffn_norm_scale": "final_layer_norm weight",
"ffn_norm_bias": "final_layer_norm bias",
"ffn_first_kernel": "fc1 weight&&expression_.transpose(0, 1)",
"ffn_first_bias": "fc1 bias",
"ffn_second_kernel": "fc2 weight&&expression_.transpose(0, 1)",
"ffn_second_bias": "fc2 bias",
}
)
dec_layer_mapping_dict = OrderedDict(
{
"self_norm_scale": "self_attn_layer_norm weight",
"self_norm_bias": "self_attn_layer_norm bias",
"self_project_kernel_qkv": "self_attn q_proj weight&&self_attn k_proj weight&&self_attn v_proj weight&&expression_.transpose(0, 1)",
"self_project_bias_qkv": "self_attn q_proj bias&&self_attn k_proj bias&&self_attn v_proj bias",
"self_project_kernel_output": "self_attn out_proj weight&&expression_.transpose(0, 1)",
"self_project_bias_output": "self_attn out_proj bias",
"encdec_norm_scale": "encoder_attn_layer_norm weight",
"encdec_norm_bias": "encoder_attn_layer_norm bias",
"encdec_project_kernel_q": "encoder_attn q_proj weight&&expression_.transpose(0, 1)",
"encdec_project_bias_q": "encoder_attn q_proj bias",
"encdec_project_kernel_output": "encoder_attn out_proj weight&&expression_.transpose(0, 1)",
"encdec_project_bias_output": "encoder_attn out_proj bias",
"ffn_norm_scale": "final_layer_norm weight",
"ffn_norm_bias": "final_layer_norm bias",
"ffn_first_kernel": "fc1 weight&&expression_.transpose(0, 1)",
"ffn_first_bias": "fc1 bias",
"ffn_second_kernel": "fc2 weight&&expression_.transpose(0, 1)",
"ffn_second_bias": "fc2 bias",
}
)
src_emb_mapping_dict = OrderedDict(
{
"norm_scale": "layer_norm weight",
"norm_bias": "layer_norm bias",
}
)
trg_emb_mapping_dict = OrderedDict(
{
"norm_scale": "layer_norm weight",
"norm_bias": "layer_norm bias",
}
)
def _get_encode_output_mapping_dict(dec_layer_num):
encode_output_kernel_pattern = [
"encoder_attn {0} k_proj weight&&encoder_attn {0} v_proj weight".format(ele)
for ele in range(dec_layer_num)
]
encode_output_bias_pattern = [
"encoder_attn {0} k_proj bias&&encoder_attn {0} v_proj bias".format(ele)
for ele in range(dec_layer_num)
]
return {
"encode_output_project_kernel_kv": "&&".join(
encode_output_kernel_pattern + ["expression_.transpose(0, 1)"]
),
"encode_output_project_bias_kv": "&&".join(encode_output_bias_pattern),
}
def export_native_fs_transformer(
model_path,
pb_path,
hdf5_path,
hdf5,
max_step=300,
bos_id=2,
eos_id=2,
pad_id=1,
):
transformer = Transformer()
# load var names
reloaded = torch.load(model_path, "cpu")
args = reloaded["args"]
model_dict = reloaded["model"]
trg_emb_mapping_dict["shared_bias"] = (
"expression_np.zeros(%d)"
% model_dict["decoder.embed_tokens.weight"].numpy().shape[0]
)
encoder_state_dict = {}
decoder_state_dict = {}
for k in model_dict:
if k.startswith("encoder."):
encoder_state_dict[k] = model_dict[k]
if k.startswith("decoder."):
decoder_state_dict[k] = model_dict[k]
dec_var_name_list = list(decoder_state_dict.keys())
enc_var_name_list = list(encoder_state_dict.keys())
enc_tensor_names = {}
for name in enc_var_name_list:
name_split = name.split(".")
if len(name_split) <= 2 or not name_split[2].isdigit():
continue
layer_id = int(name_split[2])
enc_tensor_names.setdefault(layer_id, []).append(name)
for layer_id in sorted(enc_tensor_names.keys()):
fill_pb_layer(
enc_tensor_names[layer_id],
encoder_state_dict,
transformer.encoder_stack.add(),
enc_layer_mapping_dict,
)
# fill each decoder layer's params
dec_tensor_names = {}
for name in dec_var_name_list:
name_split = name.split(".")
if len(name_split) <= 2 or not name.split(".")[2].isdigit():
continue
layer_id = int(name.split(".")[2])
dec_tensor_names.setdefault(layer_id, []).append(name)
for layer_id in sorted(dec_tensor_names.keys()):
fill_pb_layer(
dec_tensor_names[layer_id],
decoder_state_dict,
transformer.decoder_stack.add(),
dec_layer_mapping_dict,
)
fill_pb_layer(
enc_var_name_list,
encoder_state_dict,
transformer.src_embedding,
src_emb_mapping_dict,
)
# encoder token embedding
src_tb, _ = gather_token_embedding(
enc_var_name_list, encoder_state_dict, "embed_tokens"
)
transformer.src_embedding.token_embedding[:] = src_tb.flatten().tolist()
# encoder position embedding
pos_emb = None
if "encoder.embed_positions.weight" in encoder_state_dict:
pos_emb = encoder_state_dict["encoder.embed_positions.weight"].numpy()
transformer.src_embedding.position_embedding[:] = pos_emb.flatten().tolist()
else:
pos_emb = get_pos_embedding(max_step + pad_id + 1, src_tb.shape[-1]).numpy()
pos_emb_list = (
pos_emb[pad_id + 1 : max_step + pad_id + 1, :].reshape([-1]).tolist()
)
transformer.src_embedding.position_embedding[:] = pos_emb_list
print(
"encoder.embed_positions.weight -> src_embedding.position_embedding, shape: {}, conversion finished!".format(
pos_emb.shape
)
)
# fill trg_embedding
encode_output_mapping_dict = _get_encode_output_mapping_dict(len(dec_tensor_names))
trg_emb_mapping_dict.update(encode_output_mapping_dict)
fill_pb_layer(
dec_var_name_list,
decoder_state_dict,
transformer.trg_embedding,
trg_emb_mapping_dict,
)
# decoder token embedding
trg_tb, _ = gather_token_embedding(
dec_var_name_list, decoder_state_dict, "embed_tokens"
)
transformer.trg_embedding.token_embedding[:] = trg_tb.transpose().flatten().tolist()
print(
"token_embedding.weight -> trg_embedding.token_embedding, shape: {}, conversion finished!".format(
trg_tb.transpose().shape
)
)
# decoder position embedding
pos_emb = None
if "decoder.embed_positions.weight" in decoder_state_dict:
pos_emb = decoder_state_dict["decoder.embed_positions.weight"].numpy()
transformer.trg_embedding.position_embedding[:] = pos_emb.flatten().tolist()
else:
pos_emb = get_pos_embedding(max_step + pad_id + 1, trg_tb.shape[-1]).numpy()
pos_emb_list = (
pos_emb[pad_id + 1 : max_step + pad_id + 1, :].reshape([-1]).tolist()
)
transformer.trg_embedding.position_embedding[:] = pos_emb_list
print(
"decoder.embed_positions.weight -> trg_embedding.position_embedding, shape: {}, conversion finished!".format(
pos_emb.shape
)
)
# fill in conf
export_ls_config(
transformer,
args.encoder_attention_heads,
pad_id,
bos_id,
eos_id,
args.encoder_layers,
args.decoder_layers,
save_pb=True,
)
save_path = save_model(transformer, pb_path, hdf5_path, hdf5)
return save_path
if __name__ == "__main__":
args = parse_args()
model_name = ".".join(args.model.split(".")[:-1])
pb_path = f"{model_name}.pb"
hdf5_path = f"{model_name}.hdf5"
path = export_native_fs_transformer(args.model, pb_path, hdf5_path, args.hdf5)
src = [[63, 47, 65, 1507, 88, 74, 10, 2057, 362, 9, 284, 6, 2, 1, 1, 1]]
model = lsi.Transformer(path, 8)
output = model.infer(src)
# Expected result: [23, 550, 34, 118, 148, 2939, 4, 42, 32, 37, 6, 224, 10, 179, 5, 2]
print("results:", output)
|
flask_reddit/users/forms.py | huhansan666666/flask_reddit | 461 | 12689303 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Logic handling user specific input forms such as logins and registration.
"""
from flask_wtf import FlaskForm
from wtforms import TextField, PasswordField, BooleanField
from flask_wtf.recaptcha import RecaptchaField
from wtforms.validators import Required, EqualTo, Email
class LoginForm(FlaskForm):
email = TextField('Email address', [Required(), Email()])
password = PasswordField('Password', [Required()])
class RegisterForm(FlaskForm):
username = TextField('NickName', [Required()])
email = TextField('Email address', [Required(), Email()])
password = PasswordField('Password', [Required()])
confirm = PasswordField('Repeat Password', [
Required(),
EqualTo('password', message='Passwords must match')
])
accept_tos = BooleanField('I accept the Terms of Service.', [Required()])
recaptcha = RecaptchaField()
|
pincer/objects/message/attachment.py | Arthurdw/Pincer | 118 | 12689314 | <reponame>Arthurdw/Pincer
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from ...utils.api_object import APIObject
from ...utils.types import MISSING
if TYPE_CHECKING:
from typing import Optional
from ...utils.types import APINullable
from ...utils.snowflake import Snowflake
@dataclass(repr=False)
class Attachment(APIObject):
"""Represents a Discord Attachment object
Attributes
----------
id: :class:`~pincer.utils.snowflake.Snowflake`
Attachment id
filename: :class:`str`
Name of file attached
content_type: :class:`int`
The attachment's data type
size: :class:`str`
Size of file in bytes
url: :class:`str`
Source url of file
proxy_url: APINullable[:class:`str`]
A proxied url of file
height: APINullable[Optional[:class:`int`]]
Height of file (if image)
width: APINullable[Optional[:class:`int`]]
Width of file (if image)
"""
id: Snowflake
filename: str
size: int
url: str
proxy_url: str
content_type: APINullable[str] = MISSING
height: APINullable[Optional[int]] = MISSING
width: APINullable[Optional[int]] = MISSING
|
tests/test_engine.py | RobertCraigie/prisma-client-py | 518 | 12689322 | <gh_stars>100-1000
import asyncio
import contextlib
from pathlib import Path
from typing import Iterator, Optional
import pytest
from _pytest.monkeypatch import MonkeyPatch
from pytest_subprocess import FakeProcess
from prisma import Client
from prisma.utils import temp_env_update
from prisma.binaries import platform
from prisma.binaries import BINARIES, ENGINE_VERSION
from prisma.engine import errors, utils
from prisma.engine.query import QueryEngine
from prisma._compat import get_running_loop
from .utils import Testdir
QUERY_ENGINE = next( # pragma: no branch
b for b in BINARIES if b.name == 'query-engine'
)
@contextlib.contextmanager
def no_event_loop() -> Iterator[None]:
try:
current: Optional[asyncio.AbstractEventLoop] = get_running_loop()
except RuntimeError:
current = None
try:
asyncio.set_event_loop(None)
yield
finally:
asyncio.set_event_loop(current)
@pytest.mark.asyncio
async def test_engine_connects() -> None:
"""Can connect to engine"""
db = Client()
await db.connect()
with pytest.raises(errors.AlreadyConnectedError):
await db.connect()
await db.disconnect()
def test_stopping_engine_on_closed_loop() -> None:
"""Stopping the engine with no event loop available does not raise an error"""
with no_event_loop():
engine = QueryEngine(dml='')
engine.stop()
def test_engine_binary_does_not_exist(monkeypatch: MonkeyPatch) -> None:
"""No query engine binary found raises an error"""
def mock_exists(path: Path) -> bool:
return False
monkeypatch.setattr(Path, 'exists', mock_exists, raising=True)
with pytest.raises(errors.BinaryNotFoundError) as exc:
utils.ensure()
assert exc.match(
r'Expected .* or .* but neither were found\.\nTry running prisma py fetch'
)
def test_mismatched_version_error(fake_process: FakeProcess) -> None:
"""Mismatched query engine versions raises an error"""
fake_process.register_subprocess(
[QUERY_ENGINE.path, '--version'], # type: ignore[list-item]
stdout='query-engine unexpected-hash',
)
with pytest.raises(errors.MismatchedVersionsError) as exc:
utils.ensure()
assert exc.match(
f'Expected query engine version `{ENGINE_VERSION}` but got `unexpected-hash`'
)
def test_ensure_local_path(testdir: Testdir, fake_process: FakeProcess) -> None:
"""Query engine in current directory required to be the expected version"""
fake_engine = testdir.path / platform.check_for_extension(
f'prisma-query-engine-{platform.binary_platform()}'
)
fake_engine.touch()
fake_process.register_subprocess(
[fake_engine, '--version'], # type: ignore[list-item]
stdout='query-engine a-different-hash',
)
with pytest.raises(errors.MismatchedVersionsError):
path = utils.ensure()
fake_process.register_subprocess(
[fake_engine, '--version'], # type: ignore[list-item]
stdout=f'query-engine {ENGINE_VERSION}',
)
path = utils.ensure()
assert path == fake_engine
def test_ensure_env_override(testdir: Testdir, fake_process: FakeProcess) -> None:
"""Query engine path in environment variable can be any version"""
fake_engine = testdir.path / 'my-query-engine'
fake_engine.touch()
fake_process.register_subprocess(
[fake_engine, '--version'], # type: ignore[list-item]
stdout='query-engine a-different-hash',
)
with temp_env_update({'PRISMA_QUERY_ENGINE_BINARY': str(fake_engine)}):
path = utils.ensure()
assert path == fake_engine
def test_ensure_env_override_does_not_exist() -> None:
"""Query engine path in environment variable not found raises an error"""
with temp_env_update({'PRISMA_QUERY_ENGINE_BINARY': 'foo'}):
with pytest.raises(errors.BinaryNotFoundError) as exc:
utils.ensure()
assert exc.match(
r'PRISMA_QUERY_ENGINE_BINARY was provided, but no query engine was found at foo'
)
|
Giveme5W1H/examples/evaluation/evaluation.py | bkrrr/Giveme5W | 410 | 12689333 | import csv
import os
"""
evaluates results by calculating:
- ICR(pairwise intercoder reliability AB, BC, AC)
- GP(precision_generalized)
Results are global and per category
"""
filename = 'evaluation_data_how.csv'
# change csv column index, if necessary here
category_index = 2
coder_a_index = 5
coder_b_index = 6
coder_c_index = 7
# measure_agreement function to keep code more readable
def measure_agreement(a, b):
if a == b:
return 1
else:
return 0
# convert ICR rating from 0 to 2 to GP scala 0 - 1
# (done on purpose in easy to read way, aka without normalization )
def to_precision_generalized(a):
if a == 0:
# not relevant:
return 0
elif a == 1:
# partial relevant
return 0.5
else:
# relevant
return 1
with open(os.path.dirname(__file__) + '/' + filename, 'r') as csvfile:
reader = csv.reader(csvfile)
is_header = True
ICR = 0
ICR_cat = {}
generalized_precision = 0
generalized_precision_cat = {}
aggrement = []
for line in reader:
if is_header:
is_header = False
else:
category = line[category_index]
coder_a = int(line[coder_a_index])
coder_b = int(line[coder_b_index])
coder_c = int(line[coder_c_index])
# measure pairwise agreement AB, AC, CB
ab = measure_agreement(coder_a, coder_b)
ac = measure_agreement(coder_a, coder_c)
cb = measure_agreement(coder_c, coder_b)
# measure agreement of the pairs
# inter-rater reliability is based on agreement between pairs of raters.
line_agreement = (ab + ac + cb) / 3
# irc global
ICR = ICR + line_agreement
# irc per category
ICR_cat[category] = ICR_cat.get(category, 0) + line_agreement
# gp global
tmp_gp = to_precision_generalized(coder_a) + to_precision_generalized(coder_b) + to_precision_generalized(
coder_c)
generalized_precision = generalized_precision + tmp_gp
# gp per category
generalized_precision_cat[category] = generalized_precision_cat.get(category, 0) + tmp_gp
# saved, for possible output
aggrement.append((category, ab, ac, cb, line_agreement, tmp_gp))
line_count = len(aggrement)
cat_count = len(ICR_cat)
line_count_cat = line_count / cat_count
# for GP: summarize all ratings dividing by the number of all ratings
rating_count = line_count * 3 # each doc was rated by 3 coder
rating_count_cat = rating_count / cat_count
# output
print('Global ICR: ' + str(ICR / line_count))
print('Global GP: ' + str(generalized_precision / rating_count))
for cat in ICR_cat:
val = ICR_cat[cat]
print(cat + ' ICR: ' + str(val / line_count_cat))
val = generalized_precision_cat[cat]
print(cat + ' GP: ' + str(val / rating_count_cat))
|
doc/examples/scripts/structure/normal_modes.py | alex123012/biotite | 208 | 12689340 | r"""
Visualization of normal modes from an elastic network model
===========================================================
The *elastic network model* (ENM) is a fast method to estimate movements
in a protein structure, without the need to run time-consuming MD
simulations.
A protein is modelled as *mass-and-spring* model, with the masses being
the :math:`C_\alpha` atoms and the springs being the non-covalent bonds
between adjacent residues.
Via *normal mode analysis* distinct movements/oscillations can be
extracted from the model.
An *anisotropic network model* (ANM), is an ENM that includes
directional information.
Hence, the normal mode analysis yields eigenvectors, where each atom is
represented by three vector components (*x*, *y*, *z*).
Thus these vectors can be used for 3D representation.
In the case of this example a normal mode analysis on an ANM was already
conducted.
This script merely takes the structure and obtained eigenvectors
to add a smooth oscillation of the chosen normal mode to the structure.
The newly created structure has multiple models, where each model
depicts a different time in the oscillation period.
Then the multi-model structure can be used to create a video of the
oscillation using a molecular visualization program.
The file containing the eigenvectors can be downloaded via this
:download:`link </examples/download/glycosylase_anm_vectors.csv>`.
"""
# Code source: <NAME>
# License: BSD 3 clause
from tempfile import NamedTemporaryFile
from os.path import join
import numpy as np
from numpy import newaxis
import biotite.structure as struc
import biotite.structure.io as strucio
import biotite.structure.io.mmtf as mmtf
import biotite.database.rcsb as rcsb
# A CSV file containing the eigenvectors for the CA atoms
VECTOR_FILE = "../../download/glycosylase_anm_vectors.csv"
# The corresponding structure
PDB_ID = "1MUG"
# The normal mode to be visualized
# '-1' is the last (and most significant) one
MODE = -1
# The amount of frames (models) per oscillation
FRAMES = 60
# The maximum oscillation amplitude for an atom
# (The length of the ANM's eigenvectors make only sense when compared
# relative to each other, the absolute values have no significance)
MAX_AMPLITUDE = 5
# Load structure
mmtf_file = mmtf.MMTFFile.read(rcsb.fetch(PDB_ID, "mmtf"))
structure = mmtf.get_structure(mmtf_file, model=1)
# Filter first peptide chain
protein_chain = structure[
struc.filter_amino_acids(structure)
& (structure.chain_id == structure.chain_id[0])
]
# Filter CA atoms
ca = protein_chain[protein_chain.atom_name == "CA"]
# Load eigenvectors for CA atoms
# The first axis indicates the mode,
# the second axis indicates the vector component
vectors = np.loadtxt(VECTOR_FILE, delimiter=",").transpose()
# Discard the last 6 modes, as these are movements of the entire system:
# A system with N atoms has only 3N - 6 degrees of freedom
# ^^^
vectors = vectors[:-6]
# Extract vectors for given mode and reshape to (n,3) array
mode_vectors = vectors[MODE].reshape((-1, 3))
# Rescale, so that the largest vector has the length 'MAX_AMPLITUDE'
vector_lenghts = np.sqrt(np.sum(mode_vectors**2, axis=-1))
scale = MAX_AMPLITUDE / np.max(vector_lenghts)
mode_vectors *= scale
# Stepwise application of eigenvectors as smooth sine oscillation
time = np.linspace(0, 2*np.pi, FRAMES, endpoint=False)
deviation = np.sin(time)[:, newaxis, newaxis] * mode_vectors
# Apply oscillation of CA atom to all atoms in the corresponding residue
oscillation = np.zeros((FRAMES, len(protein_chain), 3))
residue_starts = struc.get_residue_starts(
protein_chain,
# The last array element will be the length of the atom array,
# i.e. no valid index
add_exclusive_stop=True
)
for i in range(len(residue_starts) -1):
res_start = residue_starts[i]
res_stop = residue_starts[i+1]
oscillation[:, res_start:res_stop, :] \
= protein_chain.coord[res_start:res_stop, :] + deviation[:, i:i+1, :]
# An atom array stack containing all frames
oscillating_structure = struc.from_template(protein_chain, oscillation)
# Save as PDB for rendering a video with PyMOL
temp = NamedTemporaryFile(suffix=".pdb")
strucio.save_structure(temp.name, oscillating_structure)
# biotite_static_image = normal_modes.gif
temp.close() |
examples/04_manipulating_images/plot_negate_image.py | SIMEXP/nilearn | 827 | 12689343 | <filename>examples/04_manipulating_images/plot_negate_image.py
"""
Negating an image with math_img
===============================
The goal of this example is to illustrate the use of the function
:func:`nilearn.image.math_img` on T-maps.
We compute a negative image by multiplying its voxel values with -1.
"""
from nilearn import datasets, plotting, image
###############################################################################
# Retrieve the data: the localizer dataset with contrast maps.
motor_images = datasets.fetch_neurovault_motor_task()
stat_img = motor_images.images[0]
###############################################################################
# Multiply voxel values by -1.
negative_stat_img = image.math_img("-img", img=stat_img)
plotting.plot_stat_map(stat_img,
cut_coords=(36, -27, 66),
threshold=3, title="t-map", vmax=9
)
plotting.plot_stat_map(negative_stat_img,
cut_coords=(36, -27, 66),
threshold=3, title="Negative t-map", vmax=9
)
plotting.show()
|
sdk/python/pulumi_azure/authorization/get_user_assigned_identity.py | henriktao/pulumi-azure | 109 | 12689358 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetUserAssignedIdentityResult',
'AwaitableGetUserAssignedIdentityResult',
'get_user_assigned_identity',
'get_user_assigned_identity_output',
]
@pulumi.output_type
class GetUserAssignedIdentityResult:
"""
A collection of values returned by getUserAssignedIdentity.
"""
def __init__(__self__, client_id=None, id=None, location=None, name=None, principal_id=None, resource_group_name=None, tags=None, tenant_id=None):
if client_id and not isinstance(client_id, str):
raise TypeError("Expected argument 'client_id' to be a str")
pulumi.set(__self__, "client_id", client_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if principal_id and not isinstance(principal_id, str):
raise TypeError("Expected argument 'principal_id' to be a str")
pulumi.set(__self__, "principal_id", principal_id)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The Client ID of the User Assigned Identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure location where the User Assigned Identity exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The Service Principal ID of the User Assigned Identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags assigned to the User Assigned Identity.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The Tenant ID of the User Assigned Identity.
"""
return pulumi.get(self, "tenant_id")
class AwaitableGetUserAssignedIdentityResult(GetUserAssignedIdentityResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUserAssignedIdentityResult(
client_id=self.client_id,
id=self.id,
location=self.location,
name=self.name,
principal_id=self.principal_id,
resource_group_name=self.resource_group_name,
tags=self.tags,
tenant_id=self.tenant_id)
def get_user_assigned_identity(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUserAssignedIdentityResult:
"""
Use this data source to access information about an existing User Assigned Identity.
## Example Usage
### Reference An Existing)
```python
import pulumi
import pulumi_azure as azure
example = azure.authorization.get_user_assigned_identity(name="name_of_user_assigned_identity",
resource_group_name="name_of_resource_group")
pulumi.export("uaiClientId", example.client_id)
pulumi.export("uaiPrincipalId", example.principal_id)
pulumi.export("uaiTenantId", example.tenant_id)
```
:param str name: The name of the User Assigned Identity.
:param str resource_group_name: The name of the Resource Group in which the User Assigned Identity exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:authorization/getUserAssignedIdentity:getUserAssignedIdentity', __args__, opts=opts, typ=GetUserAssignedIdentityResult).value
return AwaitableGetUserAssignedIdentityResult(
client_id=__ret__.client_id,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
principal_id=__ret__.principal_id,
resource_group_name=__ret__.resource_group_name,
tags=__ret__.tags,
tenant_id=__ret__.tenant_id)
@_utilities.lift_output_func(get_user_assigned_identity)
def get_user_assigned_identity_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetUserAssignedIdentityResult]:
"""
Use this data source to access information about an existing User Assigned Identity.
## Example Usage
### Reference An Existing)
```python
import pulumi
import pulumi_azure as azure
example = azure.authorization.get_user_assigned_identity(name="name_of_user_assigned_identity",
resource_group_name="name_of_resource_group")
pulumi.export("uaiClientId", example.client_id)
pulumi.export("uaiPrincipalId", example.principal_id)
pulumi.export("uaiTenantId", example.tenant_id)
```
:param str name: The name of the User Assigned Identity.
:param str resource_group_name: The name of the Resource Group in which the User Assigned Identity exists.
"""
...
|
kgp/datasets/sysid.py | alshedivat/keras-gp | 181 | 12689369 | """
Interface for system identification data (Actuator and Drives).
"""
from __future__ import print_function
import os
import sys
import zipfile
import warnings
import numpy as np
import scipy.io as sio
from six.moves import urllib
from six.moves import cPickle as pkl
SOURCE_URLS = {
'actuator': 'https://www.cs.cmu.edu/~mshediva/data/actuator.mat',
'drives': 'https://www.cs.cmu.edu/~mshediva/data/NonlinearData.zip',
}
def maybe_download(data_path, dataset_name, verbose=1):
source_url = SOURCE_URLS[dataset_name]
datadir_path = os.path.join(data_path, 'sysid')
dataset_path = os.path.join(datadir_path, dataset_name + '.mat')
# Create directories (if necessary)
if not os.path.isdir(datadir_path):
os.makedirs(datadir_path)
# Download & extract the data (if necessary)
if not os.path.isfile(dataset_path):
if dataset_name == 'actuator':
urllib.request.urlretrieve(source_url, dataset_path)
if dataset_name == 'drives':
assert source_url.endswith('.zip')
archive_path = os.path.join(datadir_path, 'tmp.zip')
urllib.request.urlretrieve(source_url, archive_path)
with zipfile.ZipFile(archive_path, 'r') as zfp:
zfp.extract('DATAPRBS.MAT', datadir_path)
os.rename(os.path.join(datadir_path, 'DATAPRBS.MAT'), dataset_path)
os.remove(archive_path)
if verbose:
print("Successfully downloaded `%s` dataset from %s." %
(dataset_name, source_url))
return dataset_path
def load_data(dataset_name, t_step=1, start=0., stop=100.,
use_targets=True, batch_size=None, verbose=1):
"""Load the system identification data.
Arguments:
----------
t_step : uint (default: 1)
Take data points t_step apart from each other in time.
start : float in [0., 100.) (default: 0.)
stop : float in (0., 100.] (default: 100.)
use_targets : bool (default: True)
batch_size : uint or None (default: None)
verbose : uint (default: 1)
"""
if dataset_name not in {'actuator', 'drives'}:
raise ValueError("Unknown dataset: %s" % dataset_name)
if 'DATA_PATH' not in os.environ:
warnings.warn("Cannot find DATA_PATH variable in the environment. "
"Using <current_working_directory>/data/ instead.")
DATA_PATH = os.path.join(os.getcwd(), 'data')
else:
DATA_PATH = os.environ['DATA_PATH']
dataset_path = maybe_download(DATA_PATH, dataset_name, verbose=verbose)
if not os.path.exists(dataset_path):
raise Exception("Cannot find data: %s" % dataset_path)
if verbose:
sys.stdout.write('Loading data...')
sys.stdout.flush()
data_mat = sio.loadmat(dataset_path)
if dataset_name == 'actuator':
X, Y = data_mat['u'], data_mat['p']
if dataset_name == 'drives':
X, Y = data_mat['u1'], data_mat['z1']
start = int((start/100.) * len(X))
stop = int((stop/100.) * len(X))
X = X[start:stop:t_step,:]
Y = Y[start:stop:t_step,:]
if use_targets:
X = np.hstack([X, Y])
if batch_size:
nb_examples = (len(X) // batch_size) * batch_size
X = X[:nb_examples]
Y = Y[:nb_examples]
if verbose:
sys.stdout.write('Done.\n')
print('# of loaded points: %d' % len(X))
return X, Y
|
package_syncing/logger.py | csch0/SublimeText-Package-Syncing | 171 | 12689389 | <reponame>csch0/SublimeText-Package-Syncing<gh_stars>100-1000
import sublime
import sublime_plugin
import logging
LOG = False
TRACE = 9
BASIC_FORMAT = "[%(asctime)s - %(levelname)s - %(filename)s %(funcName)s] %(message)s"
logging.addLevelName("TRACE", TRACE)
class CustomLogger(logging.Logger):
def isEnabledFor(self, level):
if not LOG:
return
return level >= self.getEffectiveLevel()
def trace(self, msg="", *args, **kwargs):
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
def getLogger(name, level=logging.DEBUG):
log = CustomLogger(name, level)
# Set stream handler
h = logging.StreamHandler()
h.setFormatter(logging.Formatter(BASIC_FORMAT))
log.addHandler(h)
return log
|
aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/ListTransitRouterRouteTablesRequest.py | leafcoder/aliyun-openapi-python-sdk | 1,001 | 12689394 | <filename>aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/ListTransitRouterRouteTablesRequest.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class ListTransitRouterRouteTablesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'ListTransitRouterRouteTables','cbn')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_TransitRouterRouteTableNamess(self):
return self.get_query_params().get('TransitRouterRouteTableNames')
def set_TransitRouterRouteTableNamess(self, TransitRouterRouteTableNamess):
for depth1 in range(len(TransitRouterRouteTableNamess)):
if TransitRouterRouteTableNamess[depth1] is not None:
self.add_query_param('TransitRouterRouteTableNames.' + str(depth1 + 1) , TransitRouterRouteTableNamess[depth1])
def get_TransitRouterRouteTableType(self):
return self.get_query_params().get('TransitRouterRouteTableType')
def set_TransitRouterRouteTableType(self,TransitRouterRouteTableType):
self.add_query_param('TransitRouterRouteTableType',TransitRouterRouteTableType)
def get_TransitRouterRouteTableStatus(self):
return self.get_query_params().get('TransitRouterRouteTableStatus')
def set_TransitRouterRouteTableStatus(self,TransitRouterRouteTableStatus):
self.add_query_param('TransitRouterRouteTableStatus',TransitRouterRouteTableStatus)
def get_TransitRouterRouteTableIdss(self):
return self.get_query_params().get('TransitRouterRouteTableIds')
def set_TransitRouterRouteTableIdss(self, TransitRouterRouteTableIdss):
for depth1 in range(len(TransitRouterRouteTableIdss)):
if TransitRouterRouteTableIdss[depth1] is not None:
self.add_query_param('TransitRouterRouteTableIds.' + str(depth1 + 1) , TransitRouterRouteTableIdss[depth1])
def get_NextToken(self):
return self.get_query_params().get('NextToken')
def set_NextToken(self,NextToken):
self.add_query_param('NextToken',NextToken)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_TransitRouterId(self):
return self.get_query_params().get('TransitRouterId')
def set_TransitRouterId(self,TransitRouterId):
self.add_query_param('TransitRouterId',TransitRouterId)
def get_MaxResults(self):
return self.get_query_params().get('MaxResults')
def set_MaxResults(self,MaxResults):
self.add_query_param('MaxResults',MaxResults) |
ml/kubeflow-pipelines/keras_tuner/components/kubeflow-resources/bikesw_training/eval_metrics.py | amygdala/code-snippets | 146 | 12689397 | <gh_stars>100-1000
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
# from kfp.components import InputPath, OutputPath
# An example of how the model eval info could be used to make decisions about whether or not
# to deploy the model.
def eval_metrics(
metrics: str,
thresholds: str
) -> NamedTuple('Outputs', [('deploy', str)]):
import json
import logging
def regression_threshold_check(metrics_info):
# ...
for k, v in thresholds_dict.items():
logging.info('k {}, v {}'.format(k, v))
if k in ['root_mean_squared_error', 'mae']:
if metrics_info[k][-1] > v:
logging.info('{} > {}; returning False'.format(metrics_info[k][0], v))
return ('False', )
return ('deploy', )
logging.getLogger().setLevel(logging.INFO) # TODO: make level configurable
thresholds_dict = json.loads(thresholds)
logging.info('thresholds dict: {}'.format(thresholds_dict))
logging.info('metrics: %s', metrics)
metrics_dict = json.loads(metrics)
logging.info("got metrics info: %s", metrics_dict)
res = regression_threshold_check(metrics_dict)
logging.info('deploy decision: %s', res)
return res
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(eval_metrics,
output_component_file='../../eval_metrics_component.yaml', base_image='gcr.io/deeplearning-platform-release/tf2-cpu.2-3:latest')
|
zerver/webhooks/json/tests.py | Pulkit007/zulip | 17,004 | 12689418 | <filename>zerver/webhooks/json/tests.py<gh_stars>1000+
import json
from zerver.lib.test_classes import WebhookTestCase
class JsonHookTests(WebhookTestCase):
STREAM_NAME = "json"
URL_TEMPLATE = "/api/v1/external/json?api_key={api_key}&stream={stream}"
WEBHOOK_DIR_NAME = "json"
def test_json_github_push__1_commit_message(self) -> None:
"""
Tests if json github push 1 commit is handled correctly
"""
with open("zerver/webhooks/json/fixtures/json_github_push__1_commit.json") as f:
original_fixture = json.load(f)
expected_topic = "JSON"
expected_message = """```json
{original_fixture}
```""".format(
original_fixture=json.dumps(original_fixture, indent=2)
)
self.check_webhook("json_github_push__1_commit", expected_topic, expected_message)
def test_json_pingdom_http_up_to_down_message(self) -> None:
"""
Tests if json pingdom http up to down is handled correctly
"""
with open("zerver/webhooks/json/fixtures/json_pingdom_http_up_to_down.json") as f:
original_fixture = json.load(f)
expected_topic = "JSON"
expected_message = """```json
{original_fixture}
```""".format(
original_fixture=json.dumps(original_fixture, indent=2)
)
self.check_webhook("json_pingdom_http_up_to_down", expected_topic, expected_message)
def test_json_sentry_event_for_exception_js_message(self) -> None:
"""
Tests if json sentry event for exception js is handled correctly
"""
with open("zerver/webhooks/json/fixtures/json_sentry_event_for_exception_js.json") as f:
original_fixture = json.load(f)
expected_topic = "JSON"
expected_message = """```json
{original_fixture}
```""".format(
original_fixture=json.dumps(original_fixture, indent=2)
)
self.check_webhook("json_sentry_event_for_exception_js", expected_topic, expected_message)
|
glucometerutils/support/tests/test_construct_extras.py | Flameeyes/glucometerutils | 135 | 12689429 | # -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: © 2018 The glucometerutils Authors
# SPDX-License-Identifier: MIT
"""Tests for the common routines."""
# pylint: disable=protected-access,missing-docstring
import datetime
import construct
from absl.testing import absltest
from glucometerutils.support import construct_extras
_TEST_DATE1 = datetime.datetime(1970, 1, 2, 0, 0)
_TEST_DATE2 = datetime.datetime(1971, 1, 1, 0, 0)
_TEST_DATE3 = datetime.datetime(1970, 1, 1, 0, 0)
_NEW_EPOCH = 31536000 # datetime.datetime(1971, 1, 1, 0, 0)
class TestTimestamp(absltest.TestCase):
def test_build_unix_epoch(self):
self.assertEqual(
construct_extras.Timestamp(construct.Int32ul).build(_TEST_DATE1),
b"\x80\x51\x01\x00",
)
def test_parse_unix_epoch(self):
self.assertEqual(
construct_extras.Timestamp(construct.Int32ul).parse(b"\x803\xe1\x01"),
_TEST_DATE2,
)
def test_build_custom_epoch(self):
self.assertEqual(
construct_extras.Timestamp(construct.Int32ul, epoch=_NEW_EPOCH).build(
_TEST_DATE2
),
b"\x00\x00\x00\x00",
)
def test_parse_custom_epoch(self):
self.assertEqual(
construct_extras.Timestamp(construct.Int32ul, epoch=_NEW_EPOCH).parse(
b"\x00\x00\x00\x00"
),
_TEST_DATE2,
)
def test_build_custom_epoch_negative_failure(self):
with self.assertRaises(construct.core.FormatFieldError):
construct_extras.Timestamp(construct.Int32ul, epoch=_NEW_EPOCH).build(
_TEST_DATE1
)
def test_build_custom_epoch_negative_success(self):
self.assertEqual(
construct_extras.Timestamp(construct.Int32sl, epoch=_NEW_EPOCH).build(
_TEST_DATE1
),
b"\x00\x1e\x20\xfe",
)
def test_build_varint(self):
self.assertEqual(
construct_extras.Timestamp(construct.VarInt).build(_TEST_DATE3), b"\x00"
)
def test_invalid_value(self):
with self.assertRaises(AssertionError):
construct_extras.Timestamp(construct.Int32ul).build("foo")
|
python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep-py3/test_pathlib.py | timoles/codeql | 4,036 | 12689458 | # Add taintlib to PATH so it can be imported during runtime without any hassle
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from taintlib import *
# This has no runtime impact, but allows autocomplete to work
from typing import Iterable, TYPE_CHECKING
if TYPE_CHECKING:
from ..taintlib import *
# Actual tests
import pathlib
# pathlib was added in 3.4
def test_basic():
print("\n# test_basic")
ts = TAINTED_STRING
tainted_path = pathlib.Path(ts)
tainted_pure_path = pathlib.PurePath(ts)
tainted_pure_posix_path = pathlib.PurePosixPath(ts)
tainted_pure_windows_path = pathlib.PureWindowsPath(ts)
ensure_tainted(
tainted_path, # $ tainted
tainted_pure_path, # $ tainted
tainted_pure_posix_path, # $ tainted
tainted_pure_windows_path, # $ tainted
pathlib.Path("foo") / ts, # $ tainted
ts / pathlib.Path("foo"), # $ tainted
tainted_path.joinpath("foo", "bar"), # $ tainted
pathlib.Path("foo").joinpath(tainted_path, "bar"), # $ tainted
pathlib.Path("foo").joinpath("bar", tainted_path), # $ tainted
str(tainted_path), # $ tainted
# TODO: Tainted methods and attributes
# https://docs.python.org/3.8/library/pathlib.html#methods-and-properties
)
if os.name == "posix":
tainted_posix_path = pathlib.PosixPath(ts)
ensure_tainted(
tainted_posix_path, # $ tainted
)
if os.name == "nt":
tainted_windows_path = pathlib.WindowsPath(ts)
ensure_tainted(
tainted_windows_path, # $ tainted
)
# Make tests runable
test_basic()
|
t/test_umash_fprint.py | backtrace-labs/umash | 108 | 12689472 | """
Test suite for the public fingerprinting function.
"""
from hypothesis import given, settings
import hypothesis.strategies as st
from umash import C, FFI
from umash_reference import umash, UmashKey
U64S = st.integers(min_value=0, max_value=2 ** 64 - 1)
FIELD = 2 ** 61 - 1
def repeats(min_size):
"""Repeats one byte n times."""
return st.builds(
lambda count, binary: binary * count,
st.integers(min_value=min_size, max_value=1024),
st.binary(min_size=1, max_size=1),
)
@given(
seed=U64S,
multipliers=st.lists(
st.integers(min_value=0, max_value=FIELD - 1), min_size=2, max_size=2
),
key=st.lists(
U64S,
min_size=C.UMASH_OH_PARAM_COUNT + C.UMASH_OH_TWISTING_COUNT,
max_size=C.UMASH_OH_PARAM_COUNT + C.UMASH_OH_TWISTING_COUNT,
),
data=st.binary() | repeats(1),
)
def test_public_umash_fprint(seed, multipliers, key, data):
"""Compare umash_fprint with two calls to the reference."""
expected = [
umash(UmashKey(poly=multipliers[0], oh=key), seed, data, secondary=False),
umash(UmashKey(poly=multipliers[1], oh=key), seed, data, secondary=True),
]
n_bytes = len(data)
block = FFI.new("char[]", n_bytes)
FFI.memmove(block, data, n_bytes)
params = FFI.new("struct umash_params[1]")
for i, multiplier in enumerate(multipliers):
params[0].poly[i][0] = (multiplier ** 2) % FIELD
params[0].poly[i][1] = multiplier
for i, param in enumerate(key):
params[0].oh[i] = param
actual = C.umash_fprint(params, seed, block, n_bytes)
assert [actual.hash[0], actual.hash[1]] == expected
@settings(deadline=None)
@given(
seed=U64S,
multipliers=st.lists(
st.integers(min_value=0, max_value=FIELD - 1), min_size=2, max_size=2
),
key=st.lists(
U64S,
min_size=C.UMASH_OH_PARAM_COUNT + C.UMASH_OH_TWISTING_COUNT,
max_size=C.UMASH_OH_PARAM_COUNT + C.UMASH_OH_TWISTING_COUNT,
),
byte=st.binary(min_size=1, max_size=1),
)
def test_public_umash_fprint_repeated(seed, multipliers, key, byte):
"""Compare umash_fprint with two calls to the reference, for n
repetitions of the input byte."""
params = FFI.new("struct umash_params[1]")
for i, multiplier in enumerate(multipliers):
params[0].poly[i][0] = (multiplier ** 2) % FIELD
params[0].poly[i][1] = multiplier
for i, param in enumerate(key):
params[0].oh[i] = param
for i in range(520):
data = byte * i
expected = [
umash(UmashKey(poly=multipliers[0], oh=key), seed, data, secondary=False),
umash(UmashKey(poly=multipliers[1], oh=key), seed, data, secondary=True),
]
n_bytes = len(data)
block = FFI.new("char[]", n_bytes)
FFI.memmove(block, data, n_bytes)
actual = C.umash_fprint(params, seed, block, n_bytes)
assert [actual.hash[0], actual.hash[1]] == expected
|
rgb_track/models/wrappers/rgb_simple_wrapper.py | hieuvecto/CASIA-SURF_CeFA | 133 | 12689506 | import torch.nn as nn
import torch
from at_learner_core.models.wrappers.losses import get_loss
from at_learner_core.models.wrappers.simple_classifier_wrapper import SimpleClassifierWrapper
from at_learner_core.models.architectures import get_backbone
class RGBSimpleWrapper(SimpleClassifierWrapper):
def __init__(self, wrapper_config):
super().__init__(wrapper_config)
def _init_modules(self, wrapper_config):
self.backbone, feature_size = get_backbone(wrapper_config.backbone,
pretrained=wrapper_config.pretrained,
get_feature_size=True)
self.classifier = nn.Linear(feature_size, wrapper_config.nclasses)
def predict(self, x):
features = self.backbone(x['data'])
output = self.classifier(features)
if isinstance(self.loss, (nn.BCELoss, nn.BCEWithLogitsLoss)):
output = torch.sigmoid(output)
elif isinstance(self.loss, nn.CrossEntropyLoss):
output = nn.functional.softmax(output, dim=0)[:, 1]
output_dict = {'output': output.detach().cpu()}
return output_dict
class RGBSimpleInferenceWrapper(RGBSimpleWrapper):
def forward(self, x):
features = self.backbone(x)
output = self.classifier(features)
output = torch.sigmoid(output)
return output
|
Chapter10/c10_16_straddle.py | John-ye666/Python-for-Finance-Second-Edition | 236 | 12689512 | # -*- coding: utf-8 -*-
"""
Name : c10_16_straddle.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import matplotlib.pyplot as plt
import numpy as np
sT = np.arange(30,80,5)
x=50; c=2; p=1
straddle=(abs(sT-x)+sT-x)/2-c + (abs(x-sT)+x-sT)/2-p
y0=np.zeros(len(sT))
plt.ylim(-6,20)
plt.xlim(40,70)
plt.plot(sT,y0)
plt.plot(sT,straddle,'r')
plt.plot([x,x],[-6,4],'g-.')
plt.title("Profit-loss for a Straddle")
plt.xlabel('Stock price')
plt.ylabel('Profit (loss)')
plt.annotate('Point 1='+str(x-c-p), xy=(x-p-c,0), xytext=(x-p-c,10),
arrowprops=dict(facecolor='red',shrink=0.01),)
plt.annotate('Point 2='+str(x+c+p), xy=(x+p+c,0), xytext=(x+p+c,13),
arrowprops=dict(facecolor='blue',shrink=0.01),)
plt.annotate('exercise price', xy=(x+1,-5))
plt.annotate('Buy a call and buy a put with the same exercise price',xy=(45,16))
plt.show()
|
stable_nalu/layer/_abstract_recurrent_cell.py | wlm2019/Neural-Arithmetic-Units | 147 | 12689513 |
import torch
from ..abstract import ExtendedTorchModule
class AbstractRecurrentCell(ExtendedTorchModule):
def __init__(self, Op, input_size, hidden_size, writer=None, **kwargs):
super().__init__('recurrent', writer=writer, **kwargs)
self.input_size = input_size
self.hidden_size = hidden_size
self.op = Op(input_size + hidden_size, hidden_size, writer=self.writer, **kwargs)
def reset_parameters(self):
self.op.reset_parameters()
def forward(self, x_t, h_tm1):
return self.op(torch.cat((x_t, h_tm1), dim=1))
def extra_repr(self):
return 'input_size={}, hidden_size={}'.format(
self.input_size, self.hidden_size
)
|
tests/test_visitors/test_ast/test_complexity/test_jones/test_module_complexity.py | cdhiraj40/wemake-python-styleguide | 1,931 | 12689539 | <reponame>cdhiraj40/wemake-python-styleguide
import pytest
from wemake_python_styleguide.visitors.ast.complexity.jones import (
JonesComplexityVisitor,
JonesScoreViolation,
)
module_without_nodes = ''
module_with_nodes = """
some_value = 1 + 2
other = some_value if some_value > 2 else some_value * 8 + 34
"""
module_with_function = """
def some_function(param):
return param + param * 2
some_function(12 + 6)
"""
module_with_class = """
class SomeClass(object):
def execute(self):
return self
some = SomeClass()
print(some.execute())
"""
@pytest.mark.parametrize('code', [
module_without_nodes,
module_with_nodes,
module_with_function,
module_with_class,
])
def test_module_score(
assert_errors,
parse_ast_tree,
code,
default_options,
mode,
):
"""Testing that regular nodes do not raise violations."""
tree = parse_ast_tree(mode(code))
visitor = JonesComplexityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize(('code', 'score'), [
(module_without_nodes, 0),
(module_with_nodes, 8.5),
(module_with_function, 6),
(module_with_class, 2),
])
def test_module_score_error(
assert_errors,
assert_error_text,
parse_ast_tree,
code,
score,
options,
mode,
):
"""Testing that regular nodes do raise violations."""
tree = parse_ast_tree(mode(code))
option_values = options(max_jones_score=-1)
visitor = JonesComplexityVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [JonesScoreViolation])
assert_error_text(visitor, str(score), option_values.max_jones_score)
|
netaddr/tests/ip/test_ip.py | Rockly/netaddr | 416 | 12689557 | import weakref
from netaddr import IPAddress, IPNetwork, IPRange
def test_ip_classes_are_weak_referencable():
weakref.ref(IPAddress('10.0.0.1'))
weakref.ref(IPNetwork('10.0.0.1/8'))
weakref.ref(IPRange('10.0.0.1', '10.0.0.10'))
|
actions/lib/utils.py | mattmiller87/stackstorm-time | 164 | 12689584 | <gh_stars>100-1000
import time
__all__ = [
'dt_to_timestamp'
]
def dt_to_timestamp(dt):
timestamp = int(time.mktime(dt.timetuple()))
return timestamp
|
superset/dashboards/filter_sets/commands/create.py | delorenzosoftware/superset | 18,621 | 12689597 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Any, Dict
from flask import g
from flask_appbuilder.models.sqla import Model
from flask_appbuilder.security.sqla.models import User
from superset import security_manager
from superset.dashboards.filter_sets.commands.base import BaseFilterSetCommand
from superset.dashboards.filter_sets.commands.exceptions import (
DashboardIdInconsistencyError,
FilterSetCreateFailedError,
UserIsNotDashboardOwnerError,
)
from superset.dashboards.filter_sets.consts import (
DASHBOARD_ID_FIELD,
DASHBOARD_OWNER_TYPE,
OWNER_ID_FIELD,
OWNER_TYPE_FIELD,
)
from superset.dashboards.filter_sets.dao import FilterSetDAO
logger = logging.getLogger(__name__)
class CreateFilterSetCommand(BaseFilterSetCommand):
# pylint: disable=C0103
def __init__(self, user: User, dashboard_id: int, data: Dict[str, Any]):
super().__init__(user, dashboard_id)
self._properties = data.copy()
def run(self) -> Model:
self.validate()
self._properties[DASHBOARD_ID_FIELD] = self._dashboard.id
filter_set = FilterSetDAO.create(self._properties, commit=True)
return filter_set
def validate(self) -> None:
self._validate_filterset_dashboard_exists()
if self._properties[OWNER_TYPE_FIELD] == DASHBOARD_OWNER_TYPE:
self._validate_owner_id_is_dashboard_id()
self._validate_user_is_the_dashboard_owner()
else:
self._validate_owner_id_exists()
def _validate_owner_id_exists(self) -> None:
owner_id = self._properties[OWNER_ID_FIELD]
if not (g.user.id == owner_id or security_manager.get_user_by_id(owner_id)):
raise FilterSetCreateFailedError(
str(self._dashboard_id), "owner_id does not exists"
)
def _validate_user_is_the_dashboard_owner(self) -> None:
if not self.is_user_dashboard_owner():
raise UserIsNotDashboardOwnerError(str(self._dashboard_id))
def _validate_owner_id_is_dashboard_id(self) -> None:
if (
self._properties.get(OWNER_ID_FIELD, self._dashboard_id)
!= self._dashboard_id
):
raise DashboardIdInconsistencyError(str(self._dashboard_id))
|
src/genie/libs/parser/iosxe/tests/ShowWirelessMobilitySummary/cli/equal/golden_output2_expected.py | balmasea/genieparser | 204 | 12689655 | <reponame>balmasea/genieparser
expected_output = {
"controller_config": {
"group_name": "default",
"ipv4": "10.9.3.4",
"mac_address": "AAAA.BBFF.8888",
"multicast_ipv4": "0.0.0.0",
"multicast_ipv6": "::",
"pmtu": "N/A",
"public_ip": "N/A",
"status": "N/A",
},
"mobility_summary": {
"domain_id": "0x34ac",
"dscp_value": "48",
"group_name": "default",
"keepalive": "10/3",
"mac_addr": "687d.b4ff.b9e9",
"mgmt_ipv4": "10.20.30.40",
"mgmt_ipv6": "",
"mgmt_vlan": "143",
"multi_ipv4": "0.0.0.0",
"multi_ipv6": "::",
},
} |
tutorials/eboutique/microservices/product/src/commands/__init__.py | bhardwajRahul/minos-python | 247 | 12689673 | <reponame>bhardwajRahul/minos-python<gh_stars>100-1000
from .services import (
ProductCommandService,
)
|
chitra/import_utils.py | aniketmaurya/Chitra | 158 | 12689693 | import importlib
def is_installed(module_name: str):
return importlib.util.find_spec(module_name) is not None
|
reinforcement/tensorflow/minigo/tests/test_go.py | mengkai94/training | 3,475 | 12689733 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import coords
from go import Position, PlayerMove, LibertyTracker, WHITE, BLACK
import go
import sgf_wrapper
from tests import test_utils
EMPTY_ROW = '.' * go.N + '\n'
TEST_BOARD = test_utils.load_board('''
.X.....OO
X........
''' + EMPTY_ROW * 7)
NO_HANDICAP_SGF = "(;CA[UTF-8]SZ[9]PB[Murakawa Daisuke]PW[Iyama Yuta]KM[6.5]HA[0]RE[W+1.5]GM[1];B[fd];W[cf];B[eg];W[dd];B[dc];W[cc];B[de];W[cd];B[ed];W[he];B[ce];W[be];B[df];W[bf];B[hd];W[ge];B[gd];W[gg];B[db];W[cb];B[cg];W[bg];B[gh];W[fh];B[hh];W[fg];B[eh];W[ei];B[di];W[fi];B[hg];W[dh];B[ch];W[ci];B[bh];W[ff];B[fe];W[hf];B[id];W[bi];B[ah];W[ef];B[dg];W[ee];B[di];W[ig];B[ai];W[ih];B[fb];W[hi];B[ag];W[ab];B[bd];W[bc];B[ae];W[ad];B[af];W[bd];B[ca];W[ba];B[da];W[ie])"
def coords_from_gtp_set(string):
return frozenset(map(coords.from_gtp, string.split()))
class TestBasicFunctions(test_utils.MinigoUnitTest):
def test_load_board(self):
self.assertEqualNPArray(go.EMPTY_BOARD, np.zeros([go.N, go.N]))
self.assertEqualNPArray(
go.EMPTY_BOARD, test_utils.load_board('. \n' * go.N ** 2))
def test_neighbors(self):
corner = coords.from_gtp('A1')
neighbors = [go.EMPTY_BOARD[c] for c in go.NEIGHBORS[corner]]
self.assertEqual(len(neighbors), 2)
side = coords.from_gtp('A2')
side_neighbors = [go.EMPTY_BOARD[c] for c in go.NEIGHBORS[side]]
self.assertEqual(len(side_neighbors), 3)
def test_is_koish(self):
self.assertEqual(go.is_koish(
TEST_BOARD, coords.from_gtp('A9')), BLACK)
self.assertEqual(go.is_koish(TEST_BOARD, coords.from_gtp('B8')), None)
self.assertEqual(go.is_koish(TEST_BOARD, coords.from_gtp('B9')), None)
self.assertEqual(go.is_koish(TEST_BOARD, coords.from_gtp('E5')), None)
def test_is_eyeish(self):
board = test_utils.load_board('''
.XX...XXX
X.X...X.X
XX.....X.
........X
XXXX.....
OOOX....O
X.OXX.OO.
.XO.X.O.O
XXO.X.OO.
''')
B_eyes = coords_from_gtp_set('A2 A9 B8 J7 H8')
W_eyes = coords_from_gtp_set('H2 J1 J3')
not_eyes = coords_from_gtp_set('B3 E5')
for be in B_eyes:
self.assertEqual(go.is_eyeish(board, be), BLACK, str(be))
for we in W_eyes:
self.assertEqual(go.is_eyeish(board, we), WHITE, str(we))
for ne in not_eyes:
self.assertEqual(go.is_eyeish(board, ne), None, str(ne))
class TestLibertyTracker(test_utils.MinigoUnitTest):
def test_lib_tracker_init(self):
board = test_utils.load_board('X........' + EMPTY_ROW * 8)
lib_tracker = LibertyTracker.from_board(board)
self.assertEqual(len(lib_tracker.groups), 1)
self.assertNotEqual(
lib_tracker.group_index[coords.from_gtp('A9')], go.MISSING_GROUP_ID)
self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('A9')], 2)
sole_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'A9')]]
self.assertEqual(sole_group.stones, coords_from_gtp_set('A9'))
self.assertEqual(sole_group.liberties, coords_from_gtp_set('B9 A8'))
self.assertEqual(sole_group.color, BLACK)
def test_place_stone(self):
board = test_utils.load_board('X........' + EMPTY_ROW * 8)
lib_tracker = LibertyTracker.from_board(board)
lib_tracker.add_stone(BLACK, coords.from_gtp('B9'))
self.assertEqual(len(lib_tracker.groups), 1)
self.assertNotEqual(
lib_tracker.group_index[coords.from_gtp('A9')], go.MISSING_GROUP_ID)
self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('A9')], 3)
self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('B9')], 3)
sole_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'A9')]]
self.assertEqual(sole_group.stones, coords_from_gtp_set('A9 B9'))
self.assertEqual(sole_group.liberties,
coords_from_gtp_set('C9 A8 B8'))
self.assertEqual(sole_group.color, BLACK)
def test_place_stone_opposite_color(self):
board = test_utils.load_board('X........' + EMPTY_ROW * 8)
lib_tracker = LibertyTracker.from_board(board)
lib_tracker.add_stone(WHITE, coords.from_gtp('B9'))
self.assertEqual(len(lib_tracker.groups), 2)
self.assertNotEqual(
lib_tracker.group_index[coords.from_gtp('A9')], go.MISSING_GROUP_ID)
self.assertNotEqual(
lib_tracker.group_index[coords.from_gtp('B9')], go.MISSING_GROUP_ID)
self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('A9')], 1)
self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('B9')], 2)
black_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'A9')]]
white_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'B9')]]
self.assertEqual(black_group.stones, coords_from_gtp_set('A9'))
self.assertEqual(black_group.liberties, coords_from_gtp_set('A8'))
self.assertEqual(black_group.color, BLACK)
self.assertEqual(white_group.stones, coords_from_gtp_set('B9'))
self.assertEqual(white_group.liberties, coords_from_gtp_set('C9 B8'))
self.assertEqual(white_group.color, WHITE)
def test_merge_multiple_groups(self):
board = test_utils.load_board('''
.X.......
X.X......
.X.......
''' + EMPTY_ROW * 6)
lib_tracker = LibertyTracker.from_board(board)
lib_tracker.add_stone(BLACK, coords.from_gtp('B8'))
self.assertEqual(len(lib_tracker.groups), 1)
self.assertNotEqual(
lib_tracker.group_index[coords.from_gtp('B8')], go.MISSING_GROUP_ID)
sole_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'B8')]]
self.assertEqual(sole_group.stones,
coords_from_gtp_set('B9 A8 B8 C8 B7'))
self.assertEqual(sole_group.liberties,
coords_from_gtp_set('A9 C9 D8 A7 C7 B6'))
self.assertEqual(sole_group.color, BLACK)
liberty_cache = lib_tracker.liberty_cache
for stone in sole_group.stones:
self.assertEqual(liberty_cache[stone], 6, str(stone))
def test_capture_stone(self):
board = test_utils.load_board('''
.X.......
XO.......
.X.......
''' + EMPTY_ROW * 6)
lib_tracker = LibertyTracker.from_board(board)
captured = lib_tracker.add_stone(BLACK, coords.from_gtp('C8'))
self.assertEqual(len(lib_tracker.groups), 4)
self.assertEqual(
lib_tracker.group_index[coords.from_gtp('B8')], go.MISSING_GROUP_ID)
self.assertEqual(captured, coords_from_gtp_set('B8'))
def test_capture_many(self):
board = test_utils.load_board('''
.XX......
XOO......
.XX......
''' + EMPTY_ROW * 6)
lib_tracker = LibertyTracker.from_board(board)
captured = lib_tracker.add_stone(BLACK, coords.from_gtp('D8'))
self.assertEqual(len(lib_tracker.groups), 4)
self.assertEqual(
lib_tracker.group_index[coords.from_gtp('B8')], go.MISSING_GROUP_ID)
self.assertEqual(captured, coords_from_gtp_set('B8 C8'))
left_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'A8')]]
self.assertEqual(left_group.stones, coords_from_gtp_set('A8'))
self.assertEqual(left_group.liberties,
coords_from_gtp_set('A9 B8 A7'))
right_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'D8')]]
self.assertEqual(right_group.stones, coords_from_gtp_set('D8'))
self.assertEqual(right_group.liberties,
coords_from_gtp_set('D9 C8 E8 D7'))
top_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'B9')]]
self.assertEqual(top_group.stones, coords_from_gtp_set('B9 C9'))
self.assertEqual(top_group.liberties,
coords_from_gtp_set('A9 D9 B8 C8'))
bottom_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'B7')]]
self.assertEqual(bottom_group.stones, coords_from_gtp_set('B7 C7'))
self.assertEqual(bottom_group.liberties,
coords_from_gtp_set('B8 C8 A7 D7 B6 C6'))
liberty_cache = lib_tracker.liberty_cache
for stone in top_group.stones:
self.assertEqual(liberty_cache[stone], 4, str(stone))
for stone in left_group.stones:
self.assertEqual(liberty_cache[stone], 3, str(stone))
for stone in right_group.stones:
self.assertEqual(liberty_cache[stone], 4, str(stone))
for stone in bottom_group.stones:
self.assertEqual(liberty_cache[stone], 6, str(stone))
for stone in captured:
self.assertEqual(liberty_cache[stone], 0, str(stone))
def test_capture_multiple_groups(self):
board = test_utils.load_board('''
.OX......
OXX......
XX.......
''' + EMPTY_ROW * 6)
lib_tracker = LibertyTracker.from_board(board)
captured = lib_tracker.add_stone(BLACK, coords.from_gtp('A9'))
self.assertEqual(len(lib_tracker.groups), 2)
self.assertEqual(captured, coords_from_gtp_set('B9 A8'))
corner_stone = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'A9')]]
self.assertEqual(corner_stone.stones, coords_from_gtp_set('A9'))
self.assertEqual(corner_stone.liberties, coords_from_gtp_set('B9 A8'))
surrounding_stones = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'C9')]]
self.assertEqual(surrounding_stones.stones,
coords_from_gtp_set('C9 B8 C8 A7 B7'))
self.assertEqual(surrounding_stones.liberties,
coords_from_gtp_set('B9 D9 A8 D8 C7 A6 B6'))
liberty_cache = lib_tracker.liberty_cache
for stone in corner_stone.stones:
self.assertEqual(liberty_cache[stone], 2, str(stone))
for stone in surrounding_stones.stones:
self.assertEqual(liberty_cache[stone], 7, str(stone))
def test_same_friendly_group_neighboring_twice(self):
board = test_utils.load_board('''
XX.......
X........
''' + EMPTY_ROW * 7)
lib_tracker = LibertyTracker.from_board(board)
captured = lib_tracker.add_stone(BLACK, coords.from_gtp('B8'))
self.assertEqual(len(lib_tracker.groups), 1)
sole_group_id = lib_tracker.group_index[coords.from_gtp('A9')]
sole_group = lib_tracker.groups[sole_group_id]
self.assertEqual(sole_group.stones,
coords_from_gtp_set('A9 B9 A8 B8'))
self.assertEqual(sole_group.liberties,
coords_from_gtp_set('C9 C8 A7 B7'))
self.assertEqual(captured, set())
def test_same_opponent_group_neighboring_twice(self):
board = test_utils.load_board('''
XX.......
X........
''' + EMPTY_ROW * 7)
lib_tracker = LibertyTracker.from_board(board)
captured = lib_tracker.add_stone(WHITE, coords.from_gtp('B8'))
self.assertEqual(len(lib_tracker.groups), 2)
black_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'A9')]]
self.assertEqual(black_group.stones, coords_from_gtp_set('A9 B9 A8'))
self.assertEqual(black_group.liberties, coords_from_gtp_set('C9 A7'))
white_group = lib_tracker.groups[lib_tracker.group_index[coords.from_gtp(
'B8')]]
self.assertEqual(white_group.stones, coords_from_gtp_set('B8'))
self.assertEqual(white_group.liberties, coords_from_gtp_set('C8 B7'))
self.assertEqual(captured, set())
class TestPosition(test_utils.MinigoUnitTest):
def test_passing(self):
start_position = Position(
board=TEST_BOARD,
n=0,
komi=6.5,
caps=(1, 2),
ko=coords.from_gtp('A1'),
recent=tuple(),
to_play=BLACK,
)
expected_position = Position(
board=TEST_BOARD,
n=1,
komi=6.5,
caps=(1, 2),
ko=None,
recent=(PlayerMove(BLACK, None),),
to_play=WHITE,
)
pass_position = start_position.pass_move()
self.assertEqualPositions(pass_position, expected_position)
def test_flipturn(self):
start_position = Position(
board=TEST_BOARD,
n=0,
komi=6.5,
caps=(1, 2),
ko=coords.from_gtp('A1'),
recent=tuple(),
to_play=BLACK,
)
expected_position = Position(
board=TEST_BOARD,
n=0,
komi=6.5,
caps=(1, 2),
ko=None,
recent=tuple(),
to_play=WHITE,
)
flip_position = start_position.flip_playerturn()
self.assertEqualPositions(flip_position, expected_position)
def test_is_move_suicidal(self):
board = test_utils.load_board('''
...O.O...
....O....
XO.....O.
OXO...OXO
O.XO.OX.O
OXO...OOX
XO.......
......XXO
.....XOO.
''')
position = Position(
board=board,
to_play=BLACK,
)
suicidal_moves = coords_from_gtp_set('E9 H5')
nonsuicidal_moves = coords_from_gtp_set('B5 J1 A9')
for move in suicidal_moves:
# sanity check my coordinate input
self.assertEqual(position.board[move], go.EMPTY)
self.assertTrue(position.is_move_suicidal(move), str(move))
for move in nonsuicidal_moves:
# sanity check my coordinate input
self.assertEqual(position.board[move], go.EMPTY)
self.assertFalse(position.is_move_suicidal(move), str(move))
def test_legal_moves(self):
board = test_utils.load_board('''
.O.O.XOX.
O..OOOOOX
......O.O
OO.....OX
XO.....X.
.O.......
OX.....OO
XX...OOOX
.....O.X.
''')
position = Position(board=board, to_play=BLACK)
illegal_moves = coords_from_gtp_set('A9 E9 J9')
legal_moves = coords_from_gtp_set('A4 G1 J1 H7') | {None}
for move in illegal_moves:
with self.subTest(type='illegal', move=move):
self.assertFalse(position.is_move_legal(move))
for move in legal_moves:
with self.subTest(type='legal', move=move):
self.assertTrue(position.is_move_legal(move))
# check that the bulk legal test agrees with move-by-move illegal test.
bulk_legality = position.all_legal_moves()
for i, bulk_legal in enumerate(bulk_legality):
with self.subTest(type='bulk', move=coords.from_flat(i)):
self.assertEqual(
bulk_legal, position.is_move_legal(coords.from_flat(i)))
# flip the colors and check that everything is still (il)legal
position = Position(board=-board, to_play=WHITE)
for move in illegal_moves:
with self.subTest(type='illegal', move=move):
self.assertFalse(position.is_move_legal(move))
for move in legal_moves:
with self.subTest(type='legal', move=move):
self.assertTrue(position.is_move_legal(move))
bulk_legality = position.all_legal_moves()
for i, bulk_legal in enumerate(bulk_legality):
with self.subTest(type='bulk', move=coords.from_flat(i)):
self.assertEqual(
bulk_legal, position.is_move_legal(coords.from_flat(i)))
def test_move(self):
start_position = Position(
board=TEST_BOARD,
n=0,
komi=6.5,
caps=(1, 2),
ko=None,
recent=tuple(),
to_play=BLACK,
)
expected_board = test_utils.load_board('''
.XX....OO
X........
''' + EMPTY_ROW * 7)
expected_position = Position(
board=expected_board,
n=1,
komi=6.5,
caps=(1, 2),
ko=None,
recent=(PlayerMove(BLACK, coords.from_gtp('C9')),),
to_play=WHITE,
)
actual_position = start_position.play_move(coords.from_gtp('C9'))
self.assertEqualPositions(actual_position, expected_position)
expected_board2 = test_utils.load_board('''
.XX....OO
X.......O
''' + EMPTY_ROW * 7)
expected_position2 = Position(
board=expected_board2,
n=2,
komi=6.5,
caps=(1, 2),
ko=None,
recent=(PlayerMove(BLACK, coords.from_gtp('C9')),
PlayerMove(WHITE, coords.from_gtp('J8'))),
to_play=BLACK,
)
actual_position2 = actual_position.play_move(coords.from_gtp('J8'))
self.assertEqualPositions(actual_position2, expected_position2)
def test_move_with_capture(self):
start_board = test_utils.load_board(EMPTY_ROW * 5 + '''
XXXX.....
XOOX.....
O.OX.....
OOXX.....
''')
start_position = Position(
board=start_board,
n=0,
komi=6.5,
caps=(1, 2),
ko=None,
recent=tuple(),
to_play=BLACK,
)
expected_board = test_utils.load_board(EMPTY_ROW * 5 + '''
XXXX.....
X..X.....
.X.X.....
..XX.....
''')
expected_position = Position(
board=expected_board,
n=1,
komi=6.5,
caps=(7, 2),
ko=None,
recent=(PlayerMove(BLACK, coords.from_gtp('B2')),),
to_play=WHITE,
)
actual_position = start_position.play_move(coords.from_gtp('B2'))
self.assertEqualPositions(actual_position, expected_position)
def test_ko_move(self):
start_board = test_utils.load_board('''
.OX......
OX.......
''' + EMPTY_ROW * 7)
start_position = Position(
board=start_board,
n=0,
komi=6.5,
caps=(1, 2),
ko=None,
recent=tuple(),
to_play=BLACK,
)
expected_board = test_utils.load_board('''
X.X......
OX.......
''' + EMPTY_ROW * 7)
expected_position = Position(
board=expected_board,
n=1,
komi=6.5,
caps=(2, 2),
ko=coords.from_gtp('B9'),
recent=(PlayerMove(BLACK, coords.from_gtp('A9')),),
to_play=WHITE,
)
actual_position = start_position.play_move(coords.from_gtp('A9'))
self.assertEqualPositions(actual_position, expected_position)
# Check that retaking ko is illegal until two intervening moves
with self.assertRaises(go.IllegalMove):
actual_position.play_move(coords.from_gtp('B9'))
pass_twice = actual_position.pass_move().pass_move()
ko_delayed_retake = pass_twice.play_move(coords.from_gtp('B9'))
expected_position = Position(
board=start_board,
n=4,
komi=6.5,
caps=(2, 3),
ko=coords.from_gtp('A9'),
recent=(
PlayerMove(BLACK, coords.from_gtp('A9')),
PlayerMove(WHITE, None),
PlayerMove(BLACK, None),
PlayerMove(WHITE, coords.from_gtp('B9'))),
to_play=BLACK,
)
self.assertEqualPositions(ko_delayed_retake, expected_position)
def test_is_game_over(self):
root = go.Position()
self.assertFalse(root.is_game_over())
first_pass = root.play_move(None)
self.assertFalse(first_pass.is_game_over())
second_pass = first_pass.play_move(None)
self.assertTrue(second_pass.is_game_over())
def test_scoring(self):
board = test_utils.load_board('''
.XX......
OOXX.....
OOOX...X.
OXX......
OOXXXXXX.
OOOXOXOXX
.O.OOXOOX
.O.O.OOXX
......OOO
''')
position = Position(
board=board,
n=54,
komi=6.5,
caps=(2, 5),
ko=None,
recent=tuple(),
to_play=BLACK,
)
expected_score = 1.5
self.assertEqual(position.score(), expected_score)
board = test_utils.load_board('''
XXX......
OOXX.....
OOOX...X.
OXX......
OOXXXXXX.
OOOXOXOXX
.O.OOXOOX
.O.O.OOXX
......OOO
''')
position = Position(
board=board,
n=55,
komi=6.5,
caps=(2, 5),
ko=None,
recent=tuple(),
to_play=WHITE,
)
expected_score = 2.5
self.assertEqual(position.score(), expected_score)
def test_replay_position(self):
sgf_positions = list(sgf_wrapper.replay_sgf(NO_HANDICAP_SGF))
initial = sgf_positions[0]
self.assertEqual(initial.result, go.WHITE)
final = sgf_positions[-1].position.play_move(
sgf_positions[-1].next_move)
# sanity check to ensure we're working with the right position
final_board = test_utils.load_board('''
.OXX.....
O.OX.X...
.OOX.....
OOOOXXXXX
XOXXOXOOO
XOOXOO.O.
XOXXXOOXO
XXX.XOXXO
X..XOO.O.
''')
expected_final_position = go.Position(
final_board,
n=62,
komi=6.5,
caps=(3, 2),
ko=None,
recent=tuple(),
to_play=go.BLACK
)
self.assertEqualPositions(expected_final_position, final)
self.assertEqual(final.n, len(final.recent))
replayed_positions = list(go.replay_position(final, 1))
for sgf_pos, replay_pos in zip(sgf_positions, replayed_positions):
self.assertEqualPositions(sgf_pos.position, replay_pos.position)
|
panel/tests/template/test_vanilla_manual.py | datalayer-contrib/holoviz-panel | 1,130 | 12689744 | import panel as pn
import numpy as np
import holoviews as hv
LOGO = "https://panel.holoviz.org/_static/logo_horizontal.png"
def test_vanilla_with_sidebar():
"""Returns an app that uses the vanilla template in various ways.
Inspect the app and verify that the issues of [Issue 1641]\
(https://github.com/holoviz/panel/issues/1641) have been solved
- Navbar is "sticky"/ fixed to the top
- Navbar supports adding header items to left, center and right
- There is a nice padding/ margin everywhere
- Independent scroll for sidebar and main
- Only vertical scrollbars
"""
vanilla = pn.template.VanillaTemplate(
title="Vanilla Template",
logo=LOGO,
)
xs = np.linspace(0, np.pi)
freq = pn.widgets.FloatSlider(name="Frequency", start=0, end=10, value=2)
phase = pn.widgets.FloatSlider(name="Phase", start=0, end=np.pi)
@pn.depends(freq=freq, phase=phase)
def sine(freq, phase):
return hv.Curve((xs, np.sin(xs * freq + phase))).opts(responsive=True, min_height=400)
@pn.depends(freq=freq, phase=phase)
def cosine(freq, phase):
return hv.Curve((xs, np.cos(xs * freq + phase))).opts(responsive=True, min_height=400)
vanilla.sidebar.append(freq)
vanilla.sidebar.append(phase)
vanilla.sidebar.append(pn.pane.Markdown(test_vanilla_with_sidebar.__doc__))
vanilla.sidebar.append(pn.pane.Markdown("## Sidebar Item\n" * 50))
vanilla.main.append(
pn.Row(
pn.Card(hv.DynamicMap(sine), title="Sine"),
pn.Card(hv.DynamicMap(cosine), title="Cosine"),
)
)
vanilla.main.append(
pn.Row(
pn.Card(hv.DynamicMap(sine), title="Sine"),
pn.Card(hv.DynamicMap(cosine), title="Cosine"),
)
)
vanilla.header[:] = [
pn.Row(
pn.widgets.Button(name="Left", sizing_mode="fixed", width=50),
pn.layout.HSpacer(),
pn.widgets.Button(name="Center", sizing_mode="fixed", width=50),
pn.layout.HSpacer(),
pn.widgets.Button(name="Right", sizing_mode="fixed", width=50),
)
]
vanilla.main_max_width = "600px"
return vanilla
def test_vanilla_with_no_sidebar():
"""Returns an app that uses the vanilla template in various ways.
Inspect the app and verify that the issues of [Issue 1641]\
(https://github.com/holoviz/panel/issues/1641) have been solved
- Navbar is "sticky"/ fixed to the top
- Navbar supports adding header items to left, center and right
- There is a nice padding/ margin everywhere
- Independent scroll for sidebar and main
- Only vertical scrollbars
"""
vanilla = pn.template.VanillaTemplate(
title="Vanilla Template",
logo=LOGO,
favicon="https://raw.githubusercontent.com/MarcSkovMadsen/awesome-panel/2781d86d4ed141889d633748879a120d7d8e777a/assets/images/favicon.ico",
)
xs = np.linspace(0, np.pi)
freq = pn.widgets.FloatSlider(name="Frequency", start=0, end=10, value=2)
phase = pn.widgets.FloatSlider(name="Phase", start=0, end=np.pi)
@pn.depends(freq=freq, phase=phase)
def sine(freq, phase):
return hv.Curve((xs, np.sin(xs * freq + phase))).opts(responsive=True, min_height=400)
@pn.depends(freq=freq, phase=phase)
def cosine(freq, phase):
return hv.Curve((xs, np.cos(xs * freq + phase))).opts(responsive=True, min_height=400)
vanilla.main.append(freq)
vanilla.main.append(phase)
vanilla.main.append(pn.pane.Markdown(test_vanilla_with_no_sidebar.__doc__))
vanilla.main.append(
pn.Row(
pn.Card(hv.DynamicMap(sine), title="Sine"),
pn.Card(hv.DynamicMap(cosine), title="Cosine"),
)
)
vanilla.main.append(
pn.Row(
pn.Card(hv.DynamicMap(sine), title="Sine"),
pn.Card(hv.DynamicMap(cosine), title="Cosine"),
)
)
vanilla.header[:] = [
pn.Row(
pn.widgets.Button(name="Left", sizing_mode="fixed", width=50),
pn.layout.HSpacer(),
pn.widgets.Button(name="Center", sizing_mode="fixed", width=50),
pn.layout.HSpacer(),
pn.widgets.Button(name="Right", sizing_mode="fixed", width=50),
)
]
vanilla.main_max_width = "600px"
return vanilla
if __name__.startswith("bokeh"):
pn.extension(sizing_mode="stretch_width")
test_vanilla_with_sidebar().servable()
# test_vanilla_with_no_sidebar().servable()
|
timesynth/signals/car.py | swight-prc/TimeSynth | 242 | 12689761 | <reponame>swight-prc/TimeSynth
import numpy as np
from .base_signal import BaseSignal
__all__ = ['CAR']
class CAR(BaseSignal):
"""Signal generatpr for continuously autoregressive (CAR) signals.
Parameters
----------
ar_param : number (default 1.0)
Parameter of the AR(1) process
sigma : number (default 1.0)
Standard deviation of the signal
start_value : number (default 0.0)
Starting value of the AR process
"""
def __init__(self, ar_param=1.0, sigma=0.5, start_value=0.01):
self.vectorizable = False
self.ar_param = ar_param
self.sigma = sigma
self.start_value = start_value
self.previous_value = None
self.previous_time = None
def sample_next(self, time, samples, errors):
"""Sample a single time point
Parameters
----------
time : number
Time at which a sample was required
Returns
-------
float
sampled signal for time t
"""
if self.previous_value is None:
output = self.start_value
else:
time_diff = time - self.previous_time
noise = np.random.normal(loc=0.0, scale=1.0, size=1)
output = (np.power(self.ar_param, time_diff))*self.previous_value+\
self.sigma*np.sqrt(1-np.power(self.ar_param, time_diff))*noise
self.previous_time = time
self.previous_value = output
return output
|
crabageprediction/venv/Lib/site-packages/fontTools/ttLib/tables/_h_e_a_d.py | 13rianlucero/CrabAgePrediction | 38,667 | 12689816 | from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow
from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
from fontTools.misc.arrayTools import intRect, unionRect
from . import DefaultTable
import logging
log = logging.getLogger(__name__)
headFormat = """
> # big endian
tableVersion: 16.16F
fontRevision: 16.16F
checkSumAdjustment: I
magicNumber: I
flags: H
unitsPerEm: H
created: Q
modified: Q
xMin: h
yMin: h
xMax: h
yMax: h
macStyle: H
lowestRecPPEM: H
fontDirectionHint: h
indexToLocFormat: h
glyphDataFormat: h
"""
class table__h_e_a_d(DefaultTable.DefaultTable):
dependencies = ['maxp', 'loca', 'CFF ', 'CFF2']
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(headFormat, data, self)
if rest:
# this is quite illegal, but there seem to be fonts out there that do this
log.warning("extra bytes at the end of 'head' table")
assert rest == b"\0\0"
# For timestamp fields, ignore the top four bytes. Some fonts have
# bogus values there. Since till 2038 those bytes only can be zero,
# ignore them.
#
# https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
for stamp in 'created', 'modified':
value = getattr(self, stamp)
if value > 0xFFFFFFFF:
log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
value &= 0xFFFFFFFF
setattr(self, stamp, value)
if value < 0x7C259DC0: # January 1, 1970 00:00:00
log.warning("'%s' timestamp seems very low; regarding as unix timestamp", stamp)
value += 0x7C259DC0
setattr(self, stamp, value)
def compile(self, ttFont):
if ttFont.recalcBBoxes:
# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
if 'CFF ' in ttFont:
topDict = ttFont['CFF '].cff.topDictIndex[0]
self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
elif 'CFF2' in ttFont:
topDict = ttFont['CFF2'].cff.topDictIndex[0]
charStrings = topDict.CharStrings
fontBBox = None
for charString in charStrings.values():
bounds = charString.calcBounds(charStrings)
if bounds is not None:
if fontBBox is not None:
fontBBox = unionRect(fontBBox, bounds)
else:
fontBBox = bounds
if fontBBox is not None:
self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
if ttFont.recalcTimestamp:
self.modified = timestampNow()
data = sstruct.pack(headFormat, self)
return data
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
_, names, fixes = sstruct.getformat(headFormat)
for name in names:
value = getattr(self, name)
if name in fixes:
value = floatToFixedToStr(value, precisionBits=fixes[name])
elif name in ("created", "modified"):
value = timestampToString(value)
elif name in ("magicNumber", "checkSumAdjustment"):
if value < 0:
value = value + 0x100000000
value = hex(value)
if value[-1:] == "L":
value = value[:-1]
elif name in ("macStyle", "flags"):
value = num2binary(value, 16)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
fixes = sstruct.getformat(headFormat)[2]
if name in fixes:
value = strToFixedToFloat(value, precisionBits=fixes[name])
elif name in ("created", "modified"):
value = timestampFromString(value)
elif name in ("macStyle", "flags"):
value = binary2num(value)
else:
value = safeEval(value)
setattr(self, name, value)
|
ibis/backends/impala/tests/test_connection_pool.py | GrapeBaBa/ibis | 986 | 12689821 | import ibis
def test_connection_pool_size(hdfs, env, test_data_db):
client = ibis.impala.connect(
port=env.impala_port,
hdfs_client=hdfs,
host=env.impala_host,
database=test_data_db,
)
# the client cursor may or may not be GC'd, so the connection
# pool will contain either zero or one cursor
assert len(client.con.connection_pool) in (0, 1)
def test_connection_pool_size_after_close(hdfs, env, test_data_db):
client = ibis.impala.connect(
port=env.impala_port,
hdfs_client=hdfs,
host=env.impala_host,
database=test_data_db,
)
client.close()
assert not client.con.connection_pool
|
test/data/70.py | suliveevil/vista.vim | 1,764 | 12689852 | <filename>test/data/70.py
class Foo:
class Bar:
def baz(self):
pass
|
test/examples/simple/registers/integration/common/regmodel.py | rodrigomelo9/uvm-python | 140 | 12689855 | <gh_stars>100-1000
#//
#//------------------------------------------------------------------------------
#// Copyright 2011 Mentor Graphics Corporation
#// Copyright 2011 Cadence Design Systems, Inc.
#// Copyright 2011 Synopsys, Inc.
#// Copyright 2019-2020 <NAME> (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//------------------------------------------------------------------------------
from uvm.reg import UVMReg, UVMRegField
from uvm.base import sv
from uvm.macros import uvm_object_utils
from uvm.reg.uvm_mem import UVMMem
from uvm.reg.uvm_reg_block import UVMRegBlock
from uvm.reg.uvm_reg_model import UVM_NO_COVERAGE, UVM_LITTLE_ENDIAN
class dut_ID(UVMReg):
def __init__(self, name="dut_ID"):
super().__init__(name,32, UVM_NO_COVERAGE)
self.REVISION_ID = None
self.CHIP_ID = None
self.PRODUCT_ID = None
def build(self):
self.REVISION_ID = UVMRegField.type_id.create("REVISION_ID")
self.CHIP_ID = UVMRegField.type_id.create("CHIP_ID")
self.PRODUCT_ID = UVMRegField.type_id.create("PRODUCT_ID")
self.REVISION_ID.configure(self, 8, 0, "RO", 0, 0x03, 1, 0, 1)
self.CHIP_ID.configure(self, 8, 8, "RO", 0, 0x5A, 1, 0, 1)
self.PRODUCT_ID.configure(self, 10, 16,"RO", 0, 0x176, 1, 0, 1)
uvm_object_utils(dut_ID)
class dut_DATA(UVMReg):
def __init__(self, name="dut_DATA"):
super().__init__(name,32, UVM_NO_COVERAGE)
self.value = None
def build(self):
self.value = UVMRegField.type_id.create("value")
self.value.configure(self, 32, 0, "RW", 1, 0x0, 1, 0, 1)
uvm_object_utils(dut_DATA)
class dut_SOCKET(UVMReg):
# rand UVMRegField IP
# rand UVMRegField PORT
def __init__(self, name="dut_ADDR"):
super().__init__(name, 64, UVM_NO_COVERAGE)
self.IP = None
self.PORT = None
def build(self):
self.IP = UVMRegField.type_id.create("value")
self.PORT = UVMRegField.type_id.create("value")
self.IP.configure(self, 48, 0, "RW", 0, 0x0, 1, 0, 1)
self.PORT.configure(self, 16, 48, "RW", 0, 0x0, 1, 0, 1)
self.rand('IP')
self.rand('PORT')
uvm_object_utils(dut_SOCKET)
class dut_RAM(UVMMem):
def __init__(self, name="dut_RAM"):
super().__init__(name, 8, 32, "RW", UVM_NO_COVERAGE)
uvm_object_utils(dut_RAM)
class dut_regmodel(UVMRegBlock):
# rand dut_ID ID
# rand dut_DATA DATA
# rand dut_SOCKET SOCKET[256]
# rand dut_RAM RAM
def __init__(self, name="slave"):
super().__init__(name, UVM_NO_COVERAGE)
self.SOCKET = []
self.nsockets = 16
def build(self):
# create
self.ID = dut_ID.type_id.create("ID")
self.DATA = dut_DATA.type_id.create("DATA")
for i in range(self.nsockets):
socket = dut_SOCKET.type_id.create(sv.sformatf("SOCKET[%0d]", i))
self.SOCKET.append(socket)
self.RAM = dut_RAM.type_id.create("DMA_RAM")
# configure/build registers
self.ID.configure(self, None, "ID")
self.ID.build()
self.DATA.configure(self, None, "DATA")
self.DATA.build()
for i in range(len(self.SOCKET)):
self.SOCKET[i].configure(self, None, sv.sformatf("SOCKET[%0d]", i))
self.SOCKET[i].build()
self.RAM.configure(self, "DMA")
# define default map/add register to map
self.default_map = self.create_map("default_map", 0x0, 4, UVM_LITTLE_ENDIAN, 1)
self.default_map.add_reg(self.ID, 0x0, "RW")
self.default_map.add_reg(self.DATA, 0x24, "RW")
for i in range(len(self.SOCKET)):
self.default_map.add_reg(self.SOCKET[i], 0x1000 + 16 * i, "RW")
self.default_map.add_mem(self.RAM, 0x2000, "RW")
uvm_object_utils(dut_regmodel)
|
federatedml/optim/test/activation_test.py | fqiang/FATE | 3,787 | 12689869 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unittest
import numpy as np
from federatedml.optim import activation
class TestConvergeFunction(unittest.TestCase):
def test_numeric_stability(self):
x_list = np.linspace(-709, 709, 10000)
# Original function
# a = 1. / (1. + np.exp(-x))
for x in x_list:
a1 = 1. / (1. + np.exp(-x))
a2 = activation.sigmoid(x)
self.assertTrue(np.abs(a1 - a2) < 1e-5)
if __name__ == '__main__':
unittest.main()
|
docs/conf.py | melonwater211/snorkel | 2,906 | 12689873 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import datetime
import os
import sys
import torch
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "Snorkel"
copyright = f"{datetime.datetime.now().year}, Snorkel Team"
author = "Snorkel Team"
master_doc = "index"
html_logo = "_static/octopus.png"
VERSION = {}
with open("../snorkel/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
# The full version, including alpha/beta/rc tags
release = VERSION["VERSION"]
# -- General configuration ---------------------------------------------------
# Mock imports for troublesome modules (i.e. any that use C code)
autosummary_mock_imports = ["dask", "pyspark", "spacy"]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinx.ext.linkcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
autosummary_generate = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_options = {"navigation_depth": -1, "titles_only": True}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for napoleon extension -------------------------------------------
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# -- Options for autodoc extension -------------------------------------------
# This value selects what content will be inserted into the main body of an autoclass
# directive
#
# http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#directive-autoclass
autoclass_content = "class"
# Default options to an ..autoXXX directive.
autodoc_default_options = {
"members": None,
"inherited-members": None,
"show-inheritance": None,
"special-members": "__call__",
}
# Subclasses should show parent classes docstrings if they don't override them.
autodoc_inherit_docstrings = True
# -- Options for linkcode extension ------------------------------------------
def linkcode_resolve(domain, info):
if domain != "py":
return None
if not info["module"]:
return None
module_path = info["module"].replace(".", "/")
# If only one `.`, assume it's a package
if info["module"].count(".") == 1:
return f"https://github.com/snorkel-team/snorkel/tree/master/{module_path}"
# Otherwise, it's a module
else:
return f"https://github.com/snorkel-team/snorkel/blob/master/{module_path}.py"
# -- Exclude PyTorch methods -------------------------------------------------
def skip_torch_module_member(app, what, name, obj, skip, options):
skip_torch = "Module." in str(obj) and name in dir(torch.nn.Module)
if name == "dump_patches": # Special handling for documented attrib
skip_torch = True
return skip or skip_torch
# -- Run setup ---------------------------------------------------------------
def setup(app):
app.connect("autodoc-skip-member", skip_torch_module_member)
|
src/mesh_nerf.py | qway/nerfmeshes | 113 | 12689904 | <gh_stars>100-1000
import argparse
import os
import numpy as np
import torch
import models
from importlib import import_module
from pytorch3d.structures import Meshes
from skimage import measure
from nerf.nerf_helpers import export_obj, batchify
from lightning_modules import PathParser
def create_mesh(vertices, faces_idx):
# We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0).
# (scale, center) will be used to bring the predicted mesh to its original center and scale
vertices = vertices - vertices.mean(0)
scale = max(vertices.abs().max(0)[0])
vertices = vertices / scale
# We construct a Meshes structure for the target mesh
target_mesh = Meshes(verts=[vertices], faces=[faces_idx])
return target_mesh
def extract_radiance(model, args, device, nums):
assert (isinstance(nums, tuple) or isinstance(nums, list) or isinstance(nums, int)), \
"Nums arg should be either iterable or int."
if isinstance(nums, int):
nums = (nums,) * 3
else:
assert (len(nums) == 3), "Nums arg should be of length 3, number of axes for 3D"
# Create sample tiles
tiles = [torch.linspace(-args.limit, args.limit, num) for num in nums]
# Generate 3D samples
samples = torch.stack(torch.meshgrid(*tiles), -1).view(-1, 3).float()
radiance_samples = []
for (samples,) in batchify(samples, batch_size=args.batch_size, device=device):
# Query radiance batch
radiance_batch = model.sample_points(samples, samples)
# Accumulate radiance
radiance_samples.append(radiance_batch.cpu())
# Radiance 3D grid (rgb + density)
radiance = torch.cat(radiance_samples, 0).view(*nums, 4).contiguous().numpy()
return radiance
def extract_iso_level(density, args):
# Density boundaries
min_a, max_a, std_a = density.min(), density.max(), density.std()
# Adaptive iso level
iso_value = min(max(args.iso_level, min_a + std_a), max_a - std_a)
print(f"Min density {min_a}, Max density: {max_a}, Mean density {density.mean()}")
print(f"Querying based on iso level: {iso_value}")
return iso_value
def extract_geometry(model, device, args):
# Sample points based on the grid
radiance = extract_radiance(model, args, device, args.res)
# Density grid
density = radiance[..., 3]
# Adaptive iso level
iso_value = extract_iso_level(density, args)
# Extracting iso-surface triangulated
results = measure.marching_cubes(density, iso_value)
# Use contiguous tensors
vertices, triangles, normals, _ = [torch.from_numpy(np.ascontiguousarray(result)) for result in results]
# Use contiguous tensors
normals = torch.from_numpy(np.ascontiguousarray(normals))
vertices = torch.from_numpy(np.ascontiguousarray(vertices))
triangles = torch.from_numpy(np.ascontiguousarray(triangles))
# Normalize vertices, to the (-limit, limit)
vertices = args.limit * (vertices / (args.res / 2.) - 1.)
return vertices, triangles, normals, density
def extract_geometry_with_super_sampling(model, device, args):
raise NotImplementedError
try:
mcubes = import_module("marching_cubes")
except ModuleNotFoundError:
print("""
Run the following instructions within your environment:
https://github.com/JustusThies/PyMarchingCubes#installation
""")
# Close process
exit(-1)
# Sampling resolution per axis
nums = np.array([args.res + (args.res - 1) * args.super_sampling, args.res, args.res])
# Radiance per axis, super sampling across each axis
radiances = []
for i in range(0, 3):
# Roll such that each axis is rich
radiance_axis = extract_radiance(model, args, device, np.roll(nums, i))[..., 3]
radiances.append(radiance_axis)
# accumulate
density = np.stack(radiances, axis=0)
# Adaptive iso level
iso_value = extract_iso_level(density, args)
vertices, triangles = mcubes.marching_cubes_super_sampling(*radiances, iso_value)
vertices = np.ascontiguousarray(vertices)
mcubes.export_obj(vertices, triangles, os.path.join(args.save_dir, "mesh.obj"))
def export_marching_cubes(model, args, cfg, device):
# Mesh Extraction
if args.super_sampling >= 1:
print("Generating mesh geometry...")
# Extract model geometry with super sampling across each axis
extract_geometry_with_super_sampling(model, device, args)
return
# Cached mesh path containing data
mesh_cache_path = os.path.join(args.save_dir, args.cache_name)
cached_mesh_exists = os.path.exists(mesh_cache_path)
cache_new_mesh = args.use_cached_mesh and not cached_mesh_exists
if cache_new_mesh:
print(f"Cached mesh does not exist - {mesh_cache_path}")
if args.use_cached_mesh and cached_mesh_exists:
print("Loading cached mesh geometry...")
vertices, triangles, normals, density = torch.load(mesh_cache_path)
else:
print("Generating mesh geometry...")
# Extract model geometry
vertices, triangles, normals, density = extract_geometry(model, device, args)
if cache_new_mesh or args.override_cache_mesh:
torch.save((vertices, triangles, normals, density), mesh_cache_path)
print(f"Cached mesh geometry saved to {mesh_cache_path}")
# Extracting the mesh appearance
# Ray targets and directions
targets, directions = vertices, -normals
diffuse = []
if args.no_view_dependence:
print("Diffuse map query directly without specific-views...")
# Query directly without specific-views
batch_generator = batchify(targets, directions, batch_size=args.batch_size, device=device)
for (pos_batch, dir_batch) in batch_generator:
# Diffuse batch queried
diffuse_batch = model.sample_points(pos_batch, dir_batch)[..., :3]
# Accumulate diffuse
diffuse.append(diffuse_batch.cpu())
else:
print("Diffuse map query with view dependence...")
ray_bounds = torch.tensor([0., args.view_disparity_max_bound], dtype=directions.dtype)
# Query with view dependence
# Move ray origins slightly towards negative sdf
ray_origins = targets - args.view_disparity * directions
print("Started ray-casting")
batch_generator = batchify(ray_origins, directions, batch_size=args.batch_size, device=device)
for (ray_origins, ray_directions) in batch_generator:
# View dependent diffuse batch queried
output_bundle = model.query((ray_origins, ray_directions, ray_bounds))
# Accumulate diffuse
diffuse.append(output_bundle.rgb_map.cpu())
# Query the whole diffuse map
diffuse = torch.cat(diffuse, dim=0).numpy()
# Target mesh path
mesh_path = os.path.join(args.save_dir, args.mesh_name)
# Export model
export_obj(vertices, triangles, diffuse, normals, mesh_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--log-checkpoint", type=str, default=None,
help="Training log path with the config and checkpoints to load existent configuration.",
)
parser.add_argument(
"--checkpoint", type=str, default="model_last.ckpt",
help="Load existent configuration from the latest checkpoint by default.",
)
parser.add_argument(
"--save-dir", type=str, default=".",
help="Save mesh to this directory, if specified.",
)
parser.add_argument(
"--mesh-name", type=str, default="mesh.obj",
help="Mesh name to be generated.",
)
parser.add_argument(
"--iso-level", type=float, default=32,
help="Iso-level value for triangulation",
)
parser.add_argument(
"--limit", type=float, default=1.2,
help="Limits in -xyz to xyz for marching cubes 3D grid.",
)
parser.add_argument(
"--res", type=int, default=128,
help="Sampling resolution for marching cubes, increase it for higher level of detail.",
)
parser.add_argument(
"--super-sampling", type=int, default=0,
help="Add super sampling along the edges.",
)
parser.add_argument(
"--batch-size", type=int, default=1024,
help="Higher batch size results in faster processing but needs more device memory.",
)
parser.add_argument(
"--no-view-dependence", action="store_true", default=False,
help="Disable view dependent appearance, use sampled diffuse color based on the grid"
)
parser.add_argument(
"--view-disparity", type=float, default=1e-2,
help="Ray origins offset from target based on the inverse normal for the view dependent appearance.",
)
parser.add_argument(
"--view-disparity-max-bound", type=float, default=4e0,
help="Far max possible bound, usually set to (cfg.far - cfg.near), lower it for better "
"appearance estimation when using higher resolution e.g. at least view_disparity * 2.0.",
)
parser.add_argument(
"--use-cached-mesh", action="store_true", default=False,
help="Use the cached mesh.",
)
parser.add_argument(
"--override-cache-mesh", action="store_true", default=False,
help="Caches the mesh, useful for rapid configuration appearance tweaking.",
)
parser.add_argument(
"--cache-name", type=str, default="mesh_cache.pt",
help="Mesh cache name, allows for multiple unique meshes of different resolutions.",
)
config_args = parser.parse_args()
# Existent log path
path_parser = PathParser()
cfg, _ = path_parser.parse(None, config_args.log_checkpoint, None, config_args.checkpoint)
# Available device
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load model checkpoint
print(f"Loading model from {path_parser.checkpoint_path}")
model = getattr(models, cfg.experiment.model).load_from_checkpoint(path_parser.checkpoint_path)
model = model.eval().to(device)
with torch.no_grad():
# Perform marching cubes and export the mesh
export_marching_cubes(model, config_args, cfg, device)
|
tests/test_ami.py | ukwa/mrjob | 1,538 | 12689929 | <reponame>ukwa/mrjob
# -*- coding: utf-8 -*-
# Copyright 2018 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for AMI utilities"""
from mrjob.ami import describe_base_emr_images
from tests.mock_boto3 import MockBoto3TestCase
class DescribeBaseEMRImagesTestCase(MockBoto3TestCase):
# a valid base EMR image. we can make variants of this for testing
BASE_EMR_IMAGE = {
'Architecture': 'x86_64',
'BlockDeviceMappings': [
{
'DeviceName': '/dev/xvda',
'Ebs': {
'DeleteOnTermination': True,
'Encrypted': False,
'SnapshotId': 'snap-0ceb5dfba7c0cbd4c',
'VolumeSize': 8,
'VolumeType': 'standard'
}
}
],
'CreationDate': '2018-08-11T02:33:53.000Z',
'Description': 'Amazon Linux AMI 2018.03.0.20180811 x86_64 HVM EBS',
'EnaSupport': True,
'Hypervisor': 'xen',
'ImageId': 'ami-09c6e771',
'ImageLocation': 'amazon/amzn-ami-hvm-2018.03.0.20180811-x86_64-ebs',
'ImageOwnerAlias': 'amazon',
'ImageType': 'machine',
'Name': 'amzn-ami-hvm-2018.03.0.20180811-x86_64-ebs',
'OwnerId': '137112412989',
'Public': True,
'RootDeviceName': '/dev/xvda',
'RootDeviceType': 'ebs',
'SriovNetSupport': 'simple',
'State': 'available',
'VirtualizationType': 'hvm',
}
def make_image(self, **kwargs):
"""Return a copy of BASE_EMR_IMAGE with the given fields added.
You can blank out fields by setting them to None."""
image = dict(self.BASE_EMR_IMAGE, **kwargs)
return {k: v for k, v in image.items() if v is not None}
def test_no_images(self):
self.assertEqual(describe_base_emr_images(self.client('ec2')), [])
def test_base_emr_image(self):
image = self.make_image()
self.add_mock_ec2_image(image)
self.assertEqual(describe_base_emr_images(self.client('ec2')),
[image])
def test_most_recent_image_first(self):
image_old = self.make_image(ImageId='ami-old',
CreationDate='2010-06-06T00:00:00.000Z')
image_new = self.make_image(ImageId='ami-new',
CreationDate='2015-05-06T00:00:00.000Z')
self.add_mock_ec2_image(image_old)
self.add_mock_ec2_image(image_new)
self.assertEqual(describe_base_emr_images(self.client('ec2')),
[image_new, image_old])
def test_filter_and_sort(self):
image_old = self.make_image(ImageId='ami-old',
CreationDate='2010-06-06T00:00:00.000Z')
image_new = self.make_image(ImageId='ami-new',
CreationDate='2015-05-06T00:00:00.000Z')
image_null = {}
self.add_mock_ec2_image(image_null)
self.add_mock_ec2_image(image_old)
self.add_mock_ec2_image(image_null)
self.add_mock_ec2_image(image_new)
self.add_mock_ec2_image(image_null)
self.assertEqual(describe_base_emr_images(self.client('ec2')),
[image_new, image_old])
def assert_rejects_image(self, **kwargs):
image = self.make_image(**kwargs)
self.add_mock_ec2_image(image)
self.assertNotIn(image, describe_base_emr_images(self.client('ec2')))
def test_owner_must_be_amazon(self):
self.assert_rejects_image(ImageOwnerAlias='aws-marketplace',
OwnerId='679593333241')
def test_architecture_must_be_x86_64(self):
self.assert_rejects_image(Architecture='i386')
def test_root_device_type_must_be_ebs(self):
self.assert_rejects_image(RootDeviceType='instance-store')
def test_virtualization_type_must_be_hvm(self):
self.assert_rejects_image(VirtualizationType='paravirtual')
def test_amazon_linux_1_only(self):
self.assert_rejects_image(
Name='amzn2-ami-hvm-2017.12.0.20180109-x86_64-ebs')
def test_stable_amazon_linux_versions_only(self):
# no "testing" or "rc," only dots and numbers, please
self.assert_rejects_image(
Name='amzn-ami-hvm-2017.03.rc-1.20170327-x86_64-ebs')
def test_one_volume_only(self):
self.assert_rejects_image(
BlockDeviceMappings=[
self.BASE_EMR_IMAGE['BlockDeviceMappings'][0],
{
'DeviceName': 'xvdca',
'VirtualName': 'ephemeral0',
}
]
)
def test_dont_crash_on_missing_name(self):
# shouldn't happen in practice, but just in case
self.assert_rejects_image(Name=None)
def test_dont_crash_on_missing_block_device_mappings(self):
# shouldn't happen in practice, but just in case
self.assert_rejects_image(BlockDeviceMappings=None)
def test_dont_crash_on_missing_creation_date(self):
self.assert_rejects_image(CreationDate=None)
|
eos/core/__init__.py | YSaxon/eos | 168 | 12689935 | """
EOS core package.
"""
from .base import Base, EOSException
from .profiler import Profiler
from .symfony import Symfony
from .engine import Engine
from .cookies import RememberMe
from .eos import EOS
|
elastichq/api/clusters.py | billboggs/elasticsearch-HQ | 2,026 | 12689953 | <filename>elastichq/api/clusters.py
"""
.. module:: clusters
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from flask import request, current_app
from flask_restful import Resource
from requests.exceptions import ConnectionError
from elastichq.model import ClusterDTO
from . import api
from ..common.api_response import APIResponse
from ..common.exceptions import BadRequest, request_wrapper
from ..common.status_codes import HTTP_Status
from ..service import ClusterService, ConnectionNotAuthorized, ConnectionService
class ClusterConnection(Resource):
"""
Manages cluster connection pool.
"""
@request_wrapper
def post(self):
"""
Creates a connection to a given host/port. Accepts a JSON POST BODY. This will add the connection, if it doesn't already
exist, to the pool of connections and save the details in the database.
.. :quickref: ClusterConnection; Creates a connection to the cluster.
**Example request**:
.. sourcecode:: http
POST /api/clusters/_connect/ HTTP/1.1
Accept: application/json
.. code-block:: json
{
"ip": "127.0.0.1",
"port": "9200",
"use_ssl": false
}
**Request Structure**
- *(dict) --*
- **ip** *(string) --* IP address or host name
- **port** *(string) --* ES REST API port
- **use_ssl** *(boolean) --* Whether to use HTTPS or not.
**Example response**:
.. sourcecode:: http
HTTP/1.1 201
Content-Type: application/json
.. code-block:: json
{
"data": [
{
"cluster_name": "",
"cluster_ip": "",
"cluster_port": "9200",
"cluster_scheme": "http",
"cluster_connected": true,
"cluster_host": "http://10.0.0.0:9200",
"cluster_version": "2.3.5"
}
],
"status_code": 200,
"message": null,
"response_time": 92
}
**Response Structure**
- *(dict) --*
- **cluster_name** *(string) --* cluster name
- **cluster_ip** *(string) --* IP or host
- **cluster_port** *(string) --*
- **cluster_scheme** *(string) --*
- **cluster_connected** *(boolean) --* Whether there was a successful connection.
- **cluster_host** *(string) --* The complete connection url
- **cluster_version** *(string) --* Elasticsearch version
:reqheader Accept: application/json
:resheader Content-Type: application/json
:status 201: connection created
:status 400: bad request
:status 500: server error
"""
json_data = request.get_json(force=True)
params = request.values.to_dict()
params.update(json_data)
if params.get('ip', None) is None:
raise BadRequest(message='Missing required parameters.')
scheme = 'http'
if params.get('use_ssl', False) is True:
scheme = 'https'
try:
enable_ssl = current_app.config.get('ENABLE_SSL', False)
ca_certs = current_app.config.get('CA_CERTS', None)
verify_certs = current_app.config.get('VERIFY_CERTS', None)
client_key = current_app.config.get('CLIENT_KEY', None)
client_cert = current_app.config.get('CLIENT_CERT', None)
print(client_key)
print(client_cert)
response = ConnectionService().create_connection(ip=params['ip'], port=params.get('port', "9200"),
scheme=scheme, username=params.get('username', None),
password=params.get('password', None),
fail_on_exception=True,
enable_ssl=enable_ssl, ca_certs=ca_certs,
verify_certs=verify_certs,
client_key=client_key,
client_cert=client_cert)
schema = ClusterDTO(many=False)
result = schema.dump(response)
return APIResponse(result.data, HTTP_Status.CREATED, None)
except ConnectionNotAuthorized as cna:
return APIResponse([], HTTP_Status.UNAUTHORIZED, None)
except ConnectionError as ce:
return APIResponse([], HTTP_Status.NOT_FOUND, None)
@request_wrapper
def delete(self, cluster_name):
"""
Deletes a connection from the connection pool and the database, by cluster name.
:note: This method does NOT delete your Elasticsearch Cluster, just the connection from HQ to it.
**Example request**:
.. sourcecode:: http
DELETE /clusters/_connect/<CLUSTER_NAME> HTTP/1.1
Accept: application/json
:type cluster_name: string
:param cluster_name: Name of cluster connection to remove.
:returns: List of active clusters.
:status 200: Ok
:status 400: bad request
:status 500: server error
"""
response = ConnectionService().delete_connection(cluster_name)
return APIResponse(response, HTTP_Status.OK, None)
class ClusterList(Resource):
"""
Retrieves a list of all active and inactive cluster connections.
"""
@request_wrapper
def get(self):
"""Returns a collection of clusters.
**Example request**:
.. sourcecode:: http
GET /api/clusters/ HTTP/1.1
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
.. code-block:: json
{
"status_code": 200,
"response_time": 1648,
"message": null,
"data": [
{
"cluster_name": "",
"cluster_ip": "",
"cluster_port": "9200",
"cluster_scheme": "http",
"cluster_connected": true,
"cluster_host": "http://10.0.0.0:9200",
"cluster_version": "2.3.5",
"cluster_health": { }
}
]
}
**Response Structure**
- *(dict) --*
- **cluster_name** *(string) --* cluster name
- **cluster_ip** *(string) --* IP or host
- **cluster_port** *(string) --*
- **cluster_scheme** *(string) --*
- **cluster_connected** *(boolean) --* Whether there was a successful connection.
- **cluster_host** *(string) --* The complete connection url
- **cluster_version** *(string) --* Elasticsearch version
:resheader Content-Type: application/json
:status 200: OK
:status 500: server error
"""
response = ClusterService().get_clusters()
schema = ClusterDTO(many=True)
result = schema.dump(response)
return APIResponse(result.data, HTTP_Status.OK, None)
class ClusterHealth(Resource):
"""
Wrapper around the Cluster health API https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
"""
@request_wrapper
def get(self, cluster_name):
"""
Returns cluster health for one cluster
**Example request**:
.. sourcecode:: http
GET /api/clusters/<cluster_name>/_health HTTP/1.1
:type cluster_name: string
:param cluster_name: Name of cluster
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
.. code-block:: json
{
"status_code": 200,
"data": [
{
"active_primary_shards": 10,
"relocating_shards": 0,
"cluster_name": "es_v2",
"active_shards": 10,
"task_max_waiting_in_queue_millis": 0,
"number_of_pending_tasks": 0,
"timed_out": false,
"number_of_nodes": 1,
"unassigned_shards": 10,
"number_of_in_flight_fetch": 0,
"initializing_shards": 0,
"delayed_unassigned_shards": 0,
"active_shards_percent_as_number": 50,
"status": "yellow",
"number_of_data_nodes": 1
}
],
"response_time": 38,
"message": null
}
:resheader Content-Type: application/json
:status 200: OK
:status 500: server error
"""
response = ClusterService().get_cluster_health(cluster_name)
return APIResponse(response, HTTP_Status.OK, None)
class ClusterState(Resource):
@request_wrapper
def get(self, cluster_name):
"""
Wrapper around https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html
"""
response = ClusterService().get_cluster_state(cluster_name)
return APIResponse(response, HTTP_Status.OK, None)
class ClusterSummary(Resource):
"""
Brief summary for a given cluster name
"""
@request_wrapper
def get(self, cluster_name):
"""
Given a cluster_name, returns summary information from several ES Cluster APIs.
:param cluster_name:
:return:
"""
response = ClusterService().get_cluster_summary(cluster_name)
return APIResponse(response, HTTP_Status.OK, None)
class ClusterStats(Resource):
@request_wrapper
def get(self, cluster_name):
"""
Wrapper around https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html
:param cluster_name:
:return:
"""
response = ClusterService().get_cluster_stats(cluster_name)
return APIResponse(response, HTTP_Status.OK, None)
class ClusterPendingTasks(Resource):
@request_wrapper
def get(self, cluster_name):
response = ClusterService().get_cluster_pending_tasks(cluster_name)
return APIResponse(response, HTTP_Status.OK, None)
class ClusterSettings(Resource):
@request_wrapper
def get(self, cluster_name):
response = ClusterService().get_cluster_settings(cluster_name)
return APIResponse(response, HTTP_Status.OK, None)
@request_wrapper
def put(self, cluster_name):
json_data = request.get_json(force=True)
response = ClusterService().put_cluster_settings(json_data, cluster_name)
return APIResponse(response, HTTP_Status.OK, None)
api.add_resource(ClusterConnection, '/clusters/_connect', '/clusters/<string:cluster_name>/_connect',
endpoint='clusters', methods=['POST', 'DELETE'])
api.add_resource(ClusterList, '/clusters', endpoint='clusters_list', methods=['GET'])
api.add_resource(ClusterStats, '/clusters/<string:cluster_name>/_stats', endpoint='clusters_stats', methods=['GET'])
api.add_resource(ClusterHealth, '/clusters/<string:cluster_name>/_health', endpoint='clusters_health', methods=['GET'])
api.add_resource(ClusterSummary, '/clusters/<string:cluster_name>/_summary', endpoint='clusters_summary',
methods=['GET'])
api.add_resource(ClusterState, '/clusters/<string:cluster_name>/_state', endpoint='clusters_state', methods=['GET'])
api.add_resource(ClusterPendingTasks, '/clusters/<string:cluster_name>/_pending_tasks',
endpoint='clusters_pending_tasks', methods=['GET'])
api.add_resource(ClusterSettings, '/clusters/<string:cluster_name>/_settings', endpoint='clusters_settings',
methods=['GET', 'PUT'])
|
test/ios/TestIOSDriver.py | chonty/napalm1 | 1,752 | 12689971 | <filename>test/ios/TestIOSDriver.py
# Copyright 2015 Spotify AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for IOSDriver."""
import unittest
from napalm.ios import ios
from napalm.base.test.base import TestConfigNetworkDriver, TestGettersNetworkDriver
import re
class TestConfigIOSDriver(unittest.TestCase, TestConfigNetworkDriver):
"""Configuration Tests for IOSDriver.
Core file operations:
load_replace_candidate Tested
load_merge_candidate Tested
compare_config Tested
commit_config Tested
discard_config Tested
rollback Tested
Internal methods:
_enable_confirm Tested
_disable_confirm Tested
_gen_rollback_cfg Tested as part of rollback
_check_file_exists Tested
Misc methods:
open Tested
close Skipped
normalize_compare_config Tested (indirectly)
scp_file Tested
gen_full_path Tested
"""
@classmethod
def setUpClass(cls):
"""Executed when the class is instantiated."""
ip_addr = "127.0.0.1"
username = "vagrant"
password = "<PASSWORD>"
cls.vendor = "ios"
optional_args = {"port": 12204, "dest_file_system": "bootflash:"}
cls.device = ios.IOSDriver(
ip_addr, username, password, optional_args=optional_args
)
cls.device.open()
# Setup initial state
cls.device.load_replace_candidate(filename="%s/initial.conf" % cls.vendor)
cls.device.commit_config()
def test_ios_only_confirm(self):
"""Test _disable_confirm() and _enable_confirm().
_disable_confirm() changes router config so it doesn't prompt for confirmation
_enable_confirm() reenables this
"""
# Set initial device configuration
self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor)
self.device.commit_config()
# Verify initial state
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "")
# Disable confirmation
self.device._disable_confirm()
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "file prompt quiet")
# Reenable confirmation
self.device._enable_confirm()
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "")
def test_ios_only_gen_full_path(self):
"""Test gen_full_path() method."""
output = self.device._gen_full_path(self.device.candidate_cfg)
self.assertEqual(output, self.device.dest_file_system + "/candidate_config.txt")
output = self.device._gen_full_path(self.device.rollback_cfg)
self.assertEqual(output, self.device.dest_file_system + "/rollback_config.txt")
output = self.device._gen_full_path(self.device.merge_cfg)
self.assertEqual(output, self.device.dest_file_system + "/merge_config.txt")
output = self.device._gen_full_path(
filename="running-config", file_system="system:"
)
self.assertEqual(output, "system:/running-config")
def test_ios_only_check_file_exists(self):
"""Test _check_file_exists() method."""
self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor)
valid_file = self.device._check_file_exists(
self.device.dest_file_system + "/candidate_config.txt"
)
self.assertTrue(valid_file)
invalid_file = self.device._check_file_exists(
self.device.dest_file_system + "/bogus_999.txt"
)
self.assertFalse(invalid_file)
class TestGetterIOSDriver(unittest.TestCase, TestGettersNetworkDriver):
"""Getters Tests for IOSDriver.
Get operations:
get_lldp_neighbors
get_facts
get_interfaces
get_bgp_neighbors
get_interfaces_counters
"""
@classmethod
def setUpClass(cls):
"""Executed when the class is instantiated."""
cls.mock = True
username = "vagrant"
ip_addr = "192.168.0.234"
password = "<PASSWORD>"
cls.vendor = "ios"
optional_args = {}
optional_args["dest_file_system"] = "flash:"
cls.device = ios.IOSDriver(
ip_addr, username, password, optional_args=optional_args
)
if cls.mock:
cls.device.device = FakeIOSDevice()
else:
cls.device.open()
def test_ios_only_bgp_time_conversion(self):
"""Verify time conversion static method."""
test_cases = {
"1w0d": 604800,
"00:14:23": 863,
"00:13:40": 820,
"00:00:21": 21,
"00:00:13": 13,
"00:00:49": 49,
"1d11h": 126000,
"1d17h": 147600,
"8w5d": 5270400,
"1y28w": 48470400,
"never": -1,
}
for bgp_time, result in test_cases.items():
self.assertEqual(self.device.bgp_time_conversion(bgp_time), result)
class FakeIOSDevice:
"""Class to fake a IOS Device."""
@staticmethod
def read_txt_file(filename):
"""Read a txt file and return its content."""
with open(filename) as data_file:
return data_file.read()
def send_command_expect(self, command, **kwargs):
"""Fake execute a command in the device by just returning the content of a file."""
cmd = re.sub(r"[\[\]\*\^\+\s\|]", "_", command)
output = self.read_txt_file("ios/mock_data/{}.txt".format(cmd))
return str(output)
def send_command(self, command, **kwargs):
"""Fake execute a command in the device by just returning the content of a file."""
return self.send_command_expect(command)
if __name__ == "__main__":
unittest.main()
|
test/vso_tools/copyright_check.py | nbl97/nni | 2,305 | 12689973 | <reponame>nbl97/nni<gh_stars>1000+
import os
import sys
invalid_files = []
copyright_headers = [
'# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.',
'# Copyright (c) Microsoft Corporation. All rights reserved.\n#\n# MIT License',
]
whitelist = [
'nni/version.py',
'nni/algorithms/hpo/bohb_advisor/config_generator.py',
]
for root, dirs, files in os.walk('nni'):
for file in files:
if not file.endswith('.py'):
continue
full_path = os.path.join(root, file)
if full_path in whitelist:
continue
content = open(full_path).read()
if not content.strip():
# empty file
continue
if not any(content.startswith(header) for header in copyright_headers):
invalid_files.append(full_path)
if invalid_files:
print("The following files doesn't have a copyright text header.\n")
for file in invalid_files:
print(' ' + file)
print('\nPlease add the following text at the beginning of the file.\n')
print('# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.')
sys.exit(1)
|
examples/testFile.py | tgolsson/appJar | 666 | 12689989 | import sys
sys.path.append("../")
from appJar import gui
def getQuestions(fileName = "questions.txt"):
questions = []
data = None
with open(fileName, "r") as questionsFile:
while True:
line = questionsFile.readline().strip()
if line == "EOF": break # end of file reached
elif line.startswith("-"): continue # ignore these lines
elif line.startswith("#"): # start of question
# we need to add our last question
if data is not None:
questions.append(data)
data = {"question":"", "options":[], "answer":""}
question = line[5:].strip()
nextLine = questionsFile.readline().strip()
if not nextLine.startswith("------"):
question += " " + nextLine
data["question"] = question
elif line.startswith("*"): # answer option
option = line[1:]
data["options"].append(option)
elif line.startswith("Answer:"): # answer
answer = line[8:]
data["answer"] = answer
return questions
def checkAnswer(question):
selection = app.getRadioButton(question)
answer = questions[int(question[1:])-1]["answer"]
if selection == answer:
app.infoBox("CORRECT", "You got it!")
else:
app.infoBox("WRONG", "Try again!")
with gui("QUIZ") as app:
questions = getQuestions()
with app.pagedWindow("QUIZ DEMO"):
count = 0
for q in questions:
count += 1
title = "Q" + str(count)
with app.page():
app.setSticky("EW")
app.addLabel(title, title + ": " + q["question"])
app.setLabelBg(title, "green")
with app.labelFrame(title, hideTitle=True):
for o in q["options"]:
app.addRadioButton(title, o)
app.addNamedButton("CHECK", title, checkAnswer)
|
data/logs_model/test/test_combined_model.py | giuseppe/quay | 2,027 | 12690026 | <reponame>giuseppe/quay
from datetime import date, datetime, timedelta
from freezegun import freeze_time
from data.logs_model.inmemory_model import InMemoryModel
from data.logs_model.combined_model import CombinedLogsModel
from test.fixtures import *
@pytest.fixture()
def first_model():
return InMemoryModel()
@pytest.fixture()
def second_model():
return InMemoryModel()
@pytest.fixture()
def combined_model(first_model, second_model, initialized_db):
return CombinedLogsModel(first_model, second_model)
def test_log_action(first_model, second_model, combined_model, initialized_db):
day = date(2019, 1, 1)
# Write to the combined model.
with freeze_time(day):
combined_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
simple_repo = model.repository.get_repository("devtable", "simple")
# Make sure it is found in the first model but not the second.
assert combined_model.count_repository_actions(simple_repo, day) == 1
assert first_model.count_repository_actions(simple_repo, day) == 1
assert second_model.count_repository_actions(simple_repo, day) == 0
def test_count_repository_actions(first_model, second_model, combined_model, initialized_db):
today = date(2019, 1, 1)
# Write to the combined model.
with freeze_time(today):
# Write to each model.
first_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
first_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
first_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
second_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
second_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
# Ensure the counts match as expected.
simple_repo = model.repository.get_repository("devtable", "simple")
assert first_model.count_repository_actions(simple_repo, today) == 3
assert second_model.count_repository_actions(simple_repo, today) == 2
assert combined_model.count_repository_actions(simple_repo, today) == 5
def test_yield_logs_for_export(first_model, second_model, combined_model, initialized_db):
now = datetime.now()
with freeze_time(now):
# Write to each model.
first_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
first_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
first_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
second_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
second_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
later = now + timedelta(minutes=60)
# Ensure the full set of logs is yielded.
first_logs = list(first_model.yield_logs_for_export(now, later))[0]
second_logs = list(second_model.yield_logs_for_export(now, later))[0]
combined = list(combined_model.yield_logs_for_export(now, later))
full_combined = []
for subset in combined:
full_combined.extend(subset)
assert len(full_combined) == len(first_logs) + len(second_logs)
assert full_combined == (first_logs + second_logs)
def test_lookup_logs(first_model, second_model, combined_model, initialized_db):
now = datetime.now()
with freeze_time(now):
# Write to each model.
first_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
first_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
first_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
second_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
second_model.log_action(
"push_repo", namespace_name="devtable", repository_name="simple", ip="1.2.3.4"
)
later = now + timedelta(minutes=60)
def _collect_logs(model):
page_token = None
all_logs = []
while True:
paginated_logs = model.lookup_logs(now, later, page_token=page_token)
page_token = paginated_logs.next_page_token
all_logs.extend(paginated_logs.logs)
if page_token is None:
break
return all_logs
first_logs = _collect_logs(first_model)
second_logs = _collect_logs(second_model)
combined = _collect_logs(combined_model)
assert len(combined) == len(first_logs) + len(second_logs)
assert combined == (first_logs + second_logs)
|
tests/unit/test_ignore_list.py | georgettica/URLExtract | 184 | 12690041 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file contains pytests for exeption URLs () method of URLExtract
.. Licence MIT
.. codeauthor:: <NAME> <<EMAIL>>, <EMAIL>
"""
import pytest
@pytest.mark.parametrize(
"text, expected",
[
("example.com", []),
("ample.com", ["ample.com"]),
("another-url.com", []),
("http://example.com", []),
("http://example.com:1234", []),
("<EMAIL>", []),
("ftp://admin:[email protected]", []),
(
"http://subdom.example.com:1234/path/file.html?query=something#hashtag",
["http://subdom.example.com:1234/path/file.html?query=something#hashtag"],
),
("www.example.com", ["www.example.com"]),
("example.net", ["example.net"]),
],
)
def test_ignore_list(urlextract, text, expected):
"""
Testing filtering out URLs on ignore list
:param fixture urlextract: fixture holding URLExtract object
:param str text: text in which we should find links
:param list(str) expected: list of URLs that has to be found in text
"""
urlextract.ignore_list = {"example.com", "another-url.com"}
assert expected == urlextract.find_urls(text)
|
tests/test_paths.py | JPTIZ/asciimatics | 3,197 | 12690052 | import unittest
from asciimatics.event import MouseEvent
from asciimatics.paths import Path, DynamicPath
class TestPaths(unittest.TestCase):
def assert_path_equals(self, path, oracle):
path.reset()
positions = []
while not path.is_finished():
positions.append(path.next_pos())
self.assertEqual(positions, oracle)
def test_jump_and_wait(self):
"""
Check basic movement of cursor works.
"""
path = Path()
path.jump_to(10, 10)
path.wait(3)
self.assert_path_equals(path, [(10, 10), (10, 10), (10, 10), (10, 10)])
def test_straight_lines(self):
"""
Check a path works in straight lines.
"""
# Horizontal
path = Path()
path.jump_to(10, 10)
path.move_straight_to(15, 10, 5)
self.assert_path_equals(
path,
[(10, 10), (11, 10), (12, 10), (13, 10), (14, 10), (15, 10)])
# Vertical
path = Path()
path.jump_to(5, 5)
path.move_straight_to(5, 10, 5)
self.assert_path_equals(
path,
[(5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (5, 10)])
# Diagonal spaced
path = Path()
path.jump_to(5, 5)
path.move_straight_to(15, 15, 5)
self.assert_path_equals(
path,
[(5, 5), (7, 7), (9, 9), (11, 11), (13, 13), (15, 15)])
def test_spline(self):
"""
Check a path works with a spline curve.
"""
path = Path()
path.jump_to(0, 10)
path.move_round_to([(0, 10), (20, 0), (40, 10), (20, 20), (0, 10)], 20)
self.assert_path_equals(
path,
[(0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (5, 7),
(10, 4), (15, 1), (20, 0), (25, 1), (30, 3), (35, 7),
(40, 10), (35, 12), (30, 16), (25, 18), (20, 20), (15, 18),
(10, 15), (5, 12), (0, 10)])
def test_dynamic_path(self):
"""
Check a dynamic path works as expected.
"""
class TestPath(DynamicPath):
def process_event(self, event):
# Assume that we're always passing in a MouseEvent.
self._x = event.x
self._y = event.y
# Initial path should start at specified location.
path = TestPath(None, 0, 0)
self.assertEqual(path.next_pos(), (0, 0))
self.assertFalse(path.is_finished())
# Process event should move location.
path.process_event(MouseEvent(10, 5, 0))
self.assertEqual(path.next_pos(), (10, 5))
# Reset should return to original location.
path.reset()
self.assertEqual(path.next_pos(), (0, 0))
if __name__ == '__main__':
unittest.main()
|
tests/test_settings.py | PaKyong/labelImg | 17,641 | 12690062 | <gh_stars>1000+
#!/usr/bin/env python
import os
import sys
import time
import unittest
__author__ = 'TzuTaLin'
dir_name = os.path.abspath(os.path.dirname(__file__))
libs_path = os.path.join(dir_name, '..', 'libs')
sys.path.insert(0, libs_path)
from settings import Settings
class TestSettings(unittest.TestCase):
def test_basic(self):
settings = Settings()
settings['test0'] = 'hello'
settings['test1'] = 10
settings['test2'] = [0, 2, 3]
self.assertEqual(settings.get('test3', 3), 3)
self.assertEqual(settings.save(), True)
settings.load()
self.assertEqual(settings.get('test0'), 'hello')
self.assertEqual(settings.get('test1'), 10)
settings.reset()
if __name__ == '__main__':
unittest.main()
|
tests/nlu_core_tests/component_tests/pre_processing_tests/stopword_tests.py | milyiyo/nlu | 480 | 12690077 | <filename>tests/nlu_core_tests/component_tests/pre_processing_tests/stopword_tests.py<gh_stars>100-1000
import unittest
from tests.test_utils import get_sample_pdf_with_labels, get_sample_pdf, get_sample_sdf, get_sample_pdf_with_extra_cols, get_sample_pdf_with_no_text_col ,get_sample_spark_dataframe
from nlu import *
class TestLem(unittest.TestCase):
def test_stopwords_pipe(self):
pipe = nlu.load('stopwords', verbose=True )
df = pipe.predict('HELLO WORLD! How are YOU!?!@', output_level='sentence',drop_irrelevant_cols=False, metadata=True, )
for c in df.columns: print(df[c])
if __name__ == '__main__':
unittest.main()
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/AMD/debug_output.py | ShujaKhalid/deep-rl | 210 | 12690116 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_AMD_debug_output'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_AMD_debug_output',error_checker=_errors._error_checker)
GL_DEBUG_CATEGORY_API_ERROR_AMD=_C('GL_DEBUG_CATEGORY_API_ERROR_AMD',0x9149)
GL_DEBUG_CATEGORY_APPLICATION_AMD=_C('GL_DEBUG_CATEGORY_APPLICATION_AMD',0x914F)
GL_DEBUG_CATEGORY_DEPRECATION_AMD=_C('GL_DEBUG_CATEGORY_DEPRECATION_AMD',0x914B)
GL_DEBUG_CATEGORY_OTHER_AMD=_C('GL_DEBUG_CATEGORY_OTHER_AMD',0x9150)
GL_DEBUG_CATEGORY_PERFORMANCE_AMD=_C('GL_DEBUG_CATEGORY_PERFORMANCE_AMD',0x914D)
GL_DEBUG_CATEGORY_SHADER_COMPILER_AMD=_C('GL_DEBUG_CATEGORY_SHADER_COMPILER_AMD',0x914E)
GL_DEBUG_CATEGORY_UNDEFINED_BEHAVIOR_AMD=_C('GL_DEBUG_CATEGORY_UNDEFINED_BEHAVIOR_AMD',0x914C)
GL_DEBUG_CATEGORY_WINDOW_SYSTEM_AMD=_C('GL_DEBUG_CATEGORY_WINDOW_SYSTEM_AMD',0x914A)
GL_DEBUG_LOGGED_MESSAGES_AMD=_C('GL_DEBUG_LOGGED_MESSAGES_AMD',0x9145)
GL_DEBUG_SEVERITY_HIGH_AMD=_C('GL_DEBUG_SEVERITY_HIGH_AMD',0x9146)
GL_DEBUG_SEVERITY_LOW_AMD=_C('GL_DEBUG_SEVERITY_LOW_AMD',0x9148)
GL_DEBUG_SEVERITY_MEDIUM_AMD=_C('GL_DEBUG_SEVERITY_MEDIUM_AMD',0x9147)
GL_MAX_DEBUG_LOGGED_MESSAGES_AMD=_C('GL_MAX_DEBUG_LOGGED_MESSAGES_AMD',0x9144)
GL_MAX_DEBUG_MESSAGE_LENGTH_AMD=_C('GL_MAX_DEBUG_MESSAGE_LENGTH_AMD',0x9143)
@_f
@_p.types(None,_cs.GLDEBUGPROCAMD,ctypes.c_void_p)
def glDebugMessageCallbackAMD(callback,userParam):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLsizei,arrays.GLuintArray,_cs.GLboolean)
def glDebugMessageEnableAMD(category,severity,count,ids,enabled):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLsizei,arrays.GLcharArray)
def glDebugMessageInsertAMD(category,severity,id,length,buf):pass
@_f
@_p.types(_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray,arrays.GLuintArray,arrays.GLuintArray,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetDebugMessageLogAMD(count,bufsize,categories,severities,ids,lengths,message):pass
|
nuplan/common/geometry/test/test_transform.py | motional/nuplan-devkit | 128 | 12690120 | import unittest
from unittest.mock import Mock, patch
import numpy as np
import numpy.typing as npt
from nuplan.common.actor_state.state_representation import Point2D, StateSE2
from nuplan.common.geometry.transform import (
rotate,
rotate_2d,
rotate_angle,
transform,
translate,
translate_laterally,
translate_longitudinally,
translate_longitudinally_and_laterally,
)
class TestTransform(unittest.TestCase):
"""Tests for transform functions"""
def test_rotate_2d(self) -> None:
"""Tests rotation of 2D point"""
# Setup
point = Point2D(1, 0)
rotation_matrix = np.array([[0, 1], [-1, 0]], dtype=np.float32) # type: npt.NDArray[np.float32]
# Function call
result = rotate_2d(point, rotation_matrix)
# Checks
self.assertEqual(result, Point2D(0, 1))
def test_translate(self) -> None:
"""Tests translate"""
# Setup
pose = StateSE2(3, 5, np.pi / 4)
translation = np.array([1, 2], dtype=np.float32) # type: npt.NDArray[np.float32]
# Function call
result = translate(pose, translation)
# Checks
self.assertEqual(result, StateSE2(4, 7, np.pi / 4))
def test_rotate(self) -> None:
"""Tests rotation of SE2 pose by rotation matrix"""
# Setup
pose = StateSE2(1, 2, np.pi / 4)
rotation_matrix = np.array([[0, 1], [-1, 0]], dtype=np.float32) # type: npt.NDArray[np.float32]
# Function call
result = rotate(pose, rotation_matrix)
# Checks
self.assertAlmostEqual(result.x, -2)
self.assertAlmostEqual(result.y, 1)
self.assertAlmostEqual(result.heading, -np.pi / 4)
def test_rotate_angle(self) -> None:
"""Tests rotation of SE2 pose by angle (in radian)"""
# Setup
pose = StateSE2(1, 2, np.pi / 4)
angle = -np.pi / 2
# Function call
result = rotate_angle(pose, angle)
# Checks
self.assertAlmostEqual(result.x, -2)
self.assertAlmostEqual(result.y, 1)
self.assertAlmostEqual(result.heading, -np.pi / 4)
def test_transform(self) -> None:
"""Tests transformation of SE2 pose"""
# Setup
pose = StateSE2(1, 2, 0)
transform_matrix = np.array(
[[-3, -2, 5], [0, -1, 4], [0, 0, 1]], dtype=np.float32
) # type: npt.NDArray[np.float32]
# Function call
result = transform(pose, transform_matrix)
# Checks
self.assertAlmostEqual(result.x, 2)
self.assertAlmostEqual(result.y, 0)
self.assertAlmostEqual(result.heading, np.pi, places=4)
@patch("nuplan.common.geometry.transform.translate")
def test_translate_longitudinally(self, mock_translate: Mock) -> None:
"""Tests longitudinal translation"""
# Setup
pose = StateSE2(1, 2, np.arctan(1 / 3))
# Function call
result = translate_longitudinally(pose, np.sqrt(10))
# Checks
np.testing.assert_array_almost_equal(mock_translate.call_args.args[1], np.array([3, 1]))
self.assertEqual(result, mock_translate.return_value)
@patch("nuplan.common.geometry.transform.translate")
def test_translate_laterally(self, mock_translate: Mock) -> None:
"""Tests lateral translation"""
# Setup
pose = StateSE2(1, 2, np.arctan(1 / 3))
# Function call
result = translate_laterally(pose, np.sqrt(10))
# Checks
np.testing.assert_array_almost_equal(mock_translate.call_args.args[1], np.array([-1, 3]))
self.assertEqual(result, mock_translate.return_value)
@patch("nuplan.common.geometry.transform.translate")
def test_translate_longitudinally_and_laterally(self, mock_translate: Mock) -> None:
"""Tests longitudinal and lateral translation"""
# Setup
pose = StateSE2(1, 2, np.arctan(1 / 3))
# Function call
result = translate_longitudinally_and_laterally(pose, np.sqrt(10), np.sqrt(10))
# Checks
np.testing.assert_array_almost_equal(mock_translate.call_args.args[1], np.array([2, 4]))
self.assertEqual(result, mock_translate.return_value)
if __name__ == "__main__":
unittest.main()
|
deepfence_backend/models/user.py | tuapuikia/ThreatMapper | 1,281 | 12690150 | <filename>deepfence_backend/models/user.py<gh_stars>1000+
import datetime
import arrow
from sqlalchemy.sql import func
from flask import current_app as app
from werkzeug.security import generate_password_hash, check_password_hash
from config.extensions import db
from utils.constants import USER_ROLES, INVITE_EXPIRY
from utils.custom_exception import MultipleCompaniesFound
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(100), nullable=False)
last_name = db.Column(db.String(100), nullable=False)
email = db.Column(db.String(100), unique=True, nullable=False)
role_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=False)
role = db.relationship('Role', backref=db.backref('users', lazy=True))
company_id = db.Column(db.Integer, db.ForeignKey('company.id'), nullable=False)
company = db.relationship('Company', backref=db.backref('users', lazy=True))
notification = db.relationship('Notification', uselist=False, back_populates='user')
api_key = db.Column(db.String(100), unique=True, nullable=False)
phone_number = db.Column(db.String(100), nullable=True)
password_hash = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), default=func.now())
updated_at = db.Column(db.DateTime(timezone=True), onupdate=func.now())
isActive = db.Column(db.Boolean, default=True, server_default='t', nullable=False)
def set_password(self, password):
password_hash = generate_password_hash(password)
self.password_hash = password_hash
def check_password(self, password):
return check_password_hash(self.password_hash, password)
@property
def full_name(self):
return self.first_name + " " + self.last_name
def get_identity(self):
"""
Use this to generate access token.
"""
user_identity = {
"id": self.id,
"first_name": self.first_name,
"last_name": self.last_name,
"email": self.email,
"company": self.company.name,
"role": self.role.name,
"api_key": self.api_key,
"isActive": self.isActive,
"company_license_exists": True,
}
return user_identity
def save(self, commit=True):
db.session.add(self)
# add a notification for the user.
notification = Notification(
user=self
)
db.session.add(notification)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def delete(self, commit=True):
db.session.delete(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def __repr__(self):
return '<User {}>'.format(self.email)
class Role(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True, nullable=False)
created_at = db.Column(db.DateTime(timezone=True), default=func.now())
updated_at = db.Column(db.DateTime(timezone=True), onupdate=func.now())
@staticmethod
def get_all_roles():
return Role.query.all()
def save(self, commit=True):
db.session.add(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def __repr__(self):
return '<Role {}>'.format(self.name)
class Company(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True, nullable=False)
created_at = db.Column(db.DateTime(timezone=True), default=func.now())
updated_at = db.Column(db.DateTime(timezone=True), onupdate=func.now())
def pre_save_checks(self):
# There should be only one company
count = Company.query.count()
if count == 0 and self.id is None:
return True
return False
def pre_save(self):
"""
Use this method to modify the data before storing in the database.
"""
self.name = self.name.lower()
def save(self, commit=True):
if not self.pre_save_checks():
companies = Company.query.all()
companies = [company.name for company in companies]
app.logger.error("Multiple companies found: [{}]".format(companies))
raise MultipleCompaniesFound()
self.pre_save()
db.session.add(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def delete(self, commit=True):
db.session.delete(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def get_admins(self):
"""
Get all admins of a company.
"""
admins = []
for user in self.users:
if user.isActive and user.role.name == USER_ROLES.ADMIN_USER:
admins.append(user)
return admins
def __repr__(self):
return '<Company {}>'.format(self.name)
class Invite(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), nullable=False)
code = db.Column(db.String(255), unique=True, nullable=False)
created_by = db.relationship('User', backref=db.backref('invites', lazy=True))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
company = db.relationship('Company', backref=db.backref('invites', lazy=True))
company_id = db.Column(db.Integer, db.ForeignKey('company.id'), nullable=False)
role = db.relationship('Role', backref=db.backref('invites', lazy=True))
role_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=False)
accepted = db.Column(db.Boolean, default=False)
created_at = db.Column(db.DateTime(timezone=True), default=func.now())
updated_at = db.Column(db.DateTime(timezone=True), onupdate=func.now())
def save(self, commit=True):
db.session.add(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def delete(self, commit=True):
db.session.delete(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def is_expired(self):
if arrow.get(self.created_at).datetime \
+ datetime.timedelta(seconds=INVITE_EXPIRY) < arrow.now().datetime:
return True
return False
def __repr__(self):
return '<Invite {}>'.format(self.id)
class PasswordReset(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), nullable=False)
code = db.Column(db.String(255), unique=True, nullable=False)
expiry_timedelta = db.Column(db.Integer, nullable=False)
created_at = db.Column(db.DateTime(timezone=True), default=func.now())
updated_at = db.Column(db.DateTime(timezone=True), onupdate=func.now())
def save(self, commit=True):
db.session.add(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def delete(self, commit=True):
db.session.delete(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def __repr__(self):
return '<PasswordReset {}>'.format(self.id)
class Notification(db.Model):
id = db.Column(db.Integer, primary_key=True)
last_seen = db.Column(db.DateTime(timezone=True), nullable=False, default=func.now())
# One to one mapping with user
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', back_populates='notification')
created_at = db.Column(db.DateTime(timezone=True), default=func.now())
updated_at = db.Column(db.DateTime(timezone=True), onupdate=func.now())
def save(self, commit=True):
db.session.add(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def __repr__(self):
return '<Notification {}>'.format(self.id)
|
setup.py | aswinkp/swampdragon | 366 | 12690165 | import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="SwampDragon",
version="0.4.2.2",
author="<NAME>",
author_email="<EMAIL>",
description=("SwampDragon is a powerful platform making it easy to build real time web applications, combining the power of Django and Tornado"),
license="BSD",
keywords="SwampDragon, websockets, realtime, sockjs, django, tornado, framework",
url="http://swampdragon.net",
packages=find_packages(),
long_description=read('README.txt'),
include_package_data=True,
entry_points={'console_scripts': ['dragon-admin = swampdragon.core:run', ]},
install_requires=[
"Django>=1.6,<1.10",
"Tornado >= 3.2.2",
"sockjs-tornado >= 1.0.0",
"tornado-redis >= 2.4.18",
"redis >= 2.8",
"python-dateutil >= 2.2"
],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
)
|
cesium/tests/test_time_series.py | acrellin/cesium | 603 | 12690167 | <filename>cesium/tests/test_time_series.py<gh_stars>100-1000
import os
from uuid import uuid4
import numpy.testing as npt
import numpy as np
from cesium import time_series
from cesium.time_series import TimeSeries
def sample_time_series(size=51, channels=1):
times = np.array([np.sort(np.random.random(size))
for i in range(channels)]).squeeze()
values = np.array([np.random.normal(size=size)
for i in range(channels)]).squeeze()
errors = np.array([np.random.exponential(size=size)
for i in range(channels)]).squeeze()
return times, values, errors
def test__compatible_shapes():
compat = time_series._compatible_shapes
assert compat(np.arange(5), np.arange(5))
assert not compat(np.arange(5), np.arange(6))
assert compat([np.arange(5)] * 5, [np.arange(5)] * 5)
assert not compat([np.arange(5)] * 5, [np.arange(5)] * 6)
assert not compat([np.arange(5)] * 5, [np.arange(6)] * 5)
assert not compat(np.arange(5), [np.arange(6)] * 5)
assert compat([[0, 1], [0, 1]], [[0, 1], [0, 1]])
assert not compat([[0, 1], [0, 1]], [[0], [0, 1]])
assert compat([0, 1], np.arange(2))
def assert_ts_equal(ts1, ts2):
for x1, x2 in zip((ts1.time, ts1.measurement, ts1.error),
(ts2.time, ts2.measurement, ts2.error)):
assert type(x1) == type(x2)
if isinstance(x1, np.ndarray):
assert np.array_equal(x1, x2)
else:
assert all(np.array_equal(x1_i, x2_i)
for x1_i, x2_i in zip(x1, x2))
assert ts1.label == ts2.label
assert ts1.meta_features == ts2.meta_features
assert ts1.name == ts2.name
def test_time_series_init_1d():
t, m, e = sample_time_series(channels=1)
ts = TimeSeries(t, m, e)
assert ts.time.shape == t.shape and np.allclose(ts.time, t)
assert ts.measurement.shape == m.shape and np.allclose(ts.measurement, m)
assert ts.error.shape == e.shape and np.allclose(ts.error, e)
assert ts.n_channels == 1
def test_time_series_init_2d():
n_channels = 3
t, m, e = sample_time_series(channels=n_channels)
ts = TimeSeries(t, m, e)
assert ts.time.shape == t.shape and np.allclose(ts.time, t)
assert ts.measurement.shape == m.shape and np.allclose(ts.measurement, m)
assert ts.error.shape == e.shape and np.allclose(ts.error, e)
assert ts.n_channels == n_channels
ts = TimeSeries(t[0], m, e[0])
assert ts.time.shape == m.shape and np.allclose(ts.time[0], t[0])
assert ts.measurement.shape == m.shape and np.allclose(ts.measurement, m)
assert ts.error.shape == m.shape and np.allclose(ts.error[0], e[0])
assert ts.n_channels == n_channels
def test_time_series_init_ragged():
n_channels = 3
t, m, e = sample_time_series(channels=n_channels)
t = [t[i][0:i+2] for i in range(len(t))]
m = [m[i][0:i+2] for i in range(len(m))]
e = [e[i][0:i+2] for i in range(len(e))]
ts = TimeSeries(t, m, e)
assert all(np.allclose(ts.time[i], t[i]) for i in range(len(t)))
assert all(np.allclose(ts.measurement[i], m[i]) for i in range(len(t)))
assert all(np.allclose(ts.error[i], e[i]) for i in range(len(t)))
assert ts.n_channels == n_channels
def test_time_series_default_values():
n_channels = 3
t, m, e = sample_time_series(channels=n_channels)
ts = TimeSeries(None, m[0], None)
npt.assert_allclose(ts.time,
np.linspace(0., time_series.DEFAULT_MAX_TIME,
m.shape[1]))
npt.assert_allclose(ts.error,
np.repeat(time_series.DEFAULT_ERROR_VALUE,
m.shape[1]))
assert ts.n_channels == 1
ts = TimeSeries(None, m, None)
npt.assert_allclose(ts.time[0],
np.linspace(0., time_series.DEFAULT_MAX_TIME,
m.shape[1]))
npt.assert_allclose(ts.error[0],
np.repeat(time_series.DEFAULT_ERROR_VALUE,
m.shape[1]))
assert ts.n_channels == n_channels
t = [t[i][0:i+2] for i in range(len(t))]
m = [m[i][0:i+2] for i in range(len(m))]
e = [e[i][0:i+2] for i in range(len(e))]
ts = TimeSeries(None, m, None)
for i in range(n_channels):
npt.assert_allclose(ts.time[i],
np.linspace(0., time_series.DEFAULT_MAX_TIME,
len(m[i])))
npt.assert_allclose(ts.error[i],
np.repeat(time_series.DEFAULT_ERROR_VALUE,
len(m[i])))
assert ts.n_channels == n_channels
def test_channels_iterator():
n_channels = 3
t, m, e = sample_time_series(channels=n_channels)
ts = TimeSeries(t[0], m[0], e[0])
for t_i, m_i, e_i in ts.channels():
npt.assert_allclose(t_i, t[0])
npt.assert_allclose(m_i, m[0])
npt.assert_allclose(e_i, e[0])
ts = TimeSeries(t, m, e)
for (t_i, m_i, e_i), i in zip(ts.channels(), range(n_channels)):
npt.assert_allclose(t_i, t[i])
npt.assert_allclose(m_i, m[i])
npt.assert_allclose(e_i, e[i])
t = [t[i][0:i+2] for i in range(len(t))]
m = [m[i][0:i+2] for i in range(len(m))]
e = [e[i][0:i+2] for i in range(len(e))]
ts = TimeSeries(t, m, e)
for (t_i, m_i, e_i), i in zip(ts.channels(), range(n_channels)):
npt.assert_allclose(t_i, t[i])
npt.assert_allclose(m_i, m[i])
npt.assert_allclose(e_i, e[i])
def test_time_series_npz(tmpdir):
n_channels = 3
t, m, e = sample_time_series(channels=n_channels)
ts = TimeSeries(t[0], m[0], e[0])
ts_path = os.path.join(str(tmpdir), str(uuid4()) + '.npz')
ts.save(ts_path)
ts_loaded = time_series.load(ts_path)
assert_ts_equal(ts, ts_loaded)
ts = TimeSeries(t[0], m, e[0])
ts_path = os.path.join(str(tmpdir), str(uuid4()) + '.npz')
ts.save(ts_path)
ts_loaded = time_series.load(ts_path)
assert_ts_equal(ts, ts_loaded)
t = [t[i][0:i+2] for i in range(len(t))]
m = [m[i][0:i+2] for i in range(len(m))]
e = [e[i][0:i+2] for i in range(len(e))]
ts = TimeSeries(t, m, e)
ts_path = os.path.join(str(tmpdir), str(uuid4()) + '.npz')
ts.save(ts_path)
ts_loaded = time_series.load(ts_path)
assert_ts_equal(ts, ts_loaded)
def test_time_series_sort():
t, m, e = sample_time_series(channels=1)
t[:2] = t[1::-1]
ts = TimeSeries(t, m, e)
npt.assert_allclose(ts.time, np.sort(t))
npt.assert_allclose(ts.measurement, m[np.argsort(t)])
npt.assert_allclose(ts.error, e[np.argsort(t)])
n_channels = 3
t, m, e = sample_time_series(channels=n_channels)
t[:, :2] = t[:, 1::-1]
ts = TimeSeries(t, m, e)
for i in range(len(m)):
npt.assert_allclose(ts.time[i], np.sort(t[i]))
npt.assert_allclose(ts.measurement[i], m[i][np.argsort(t[i])])
npt.assert_allclose(ts.error[i], e[i][np.argsort(t[i])])
ts = TimeSeries(t[0], m, e[0])
for i in range(len(m)):
npt.assert_allclose(ts.time[i], np.sort(t[0]))
npt.assert_allclose(ts.measurement[i], m[i][np.argsort(t[0])])
npt.assert_allclose(ts.error[i], e[0][np.argsort(t[0])])
|
test_bot/cogs/message_commands.py | Enegg/disnake | 290 | 12690178 | <reponame>Enegg/disnake
import disnake
from disnake.ext import commands
class MessageCommands(commands.Cog):
def __init__(self, bot):
self.bot: commands.Bot = bot
@commands.message_command(name="Reverse")
async def reverse(self, inter: disnake.MessageCommandInteraction):
await inter.response.send_message(inter.target.content[::-1])
def setup(bot):
bot.add_cog(MessageCommands(bot))
print(f"> Extension {__name__} is ready\n")
|
rlgraph/components/neural_networks/actor_component.py | RLGraph/RLGraph | 290 | 12690189 | # Copyright 2018/2019 The RLgraph authors, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
from rlgraph.components.common.container_merger import ContainerMerger
from rlgraph.components.component import Component
from rlgraph.components.explorations.exploration import Exploration
from rlgraph.components.neural_networks.preprocessor_stack import PreprocessorStack
from rlgraph.components.policies.policy import Policy
from rlgraph.utils.decorators import rlgraph_api
class ActorComponent(Component):
"""
A Component that incorporates an entire pipeline from env state to an action choice.
Includes preprocessor, policy and exploration sub-components.
"""
def __init__(self, preprocessor_spec, policy_spec, exploration_spec=None, **kwargs):
"""
Args:
preprocessor_spec (Union[list,dict,PreprocessorSpec]):
- A dict if the state from the Env will come in as a ContainerSpace (e.g. Dict). In this case, each
each key in this dict specifies, which value in the incoming dict should go through which PreprocessorStack.
- A list with layer specs.
- A PreprocessorStack object.
policy_spec (Union[dict,Policy]): A specification dict for a Policy object or a Policy object directly.
exploration_spec (Union[dict,Exploration]): A specification dict for an Exploration object or an Exploration
object directly.
"""
super(ActorComponent, self).__init__(scope=kwargs.pop("scope", "actor-component"), **kwargs)
self.preprocessor = PreprocessorStack.from_spec(preprocessor_spec)
self.policy = Policy.from_spec(policy_spec)
self.exploration = Exploration.from_spec(exploration_spec)
self.tuple_merger = ContainerMerger(is_tuple=True, merge_tuples_into_one=True)
self.add_components(self.policy, self.exploration, self.preprocessor, self.tuple_merger)
@rlgraph_api
def get_preprocessed_state_and_action(
self, states, other_nn_inputs=None, time_percentage=None, use_exploration=True
):
"""
API-method to get the preprocessed state and an action based on a raw state from an Env.
Args:
states (DataOp): The states coming directly from the environment.
other_nn_inputs (Optional[DataOpTuple]): Inputs to the NN that don't have to be pushed through the preprocessor.
time_percentage (SingleDataOp): The current consumed time (0.0 to 1.0) with respect to a max timestep
value.
use_exploration (Optional[DataOp]): Whether to use exploration or not.
Returns:
dict (3x DataOp):
`preprocessed_state` (DataOp): The preprocessed states.
`action` (DataOp): The chosen action.
#`last_internal_states` (DataOp): If RNN-based, the last internal states after passing through
#states. Or None.
"""
preprocessed_states = self.preprocessor.preprocess(states)
nn_inputs = preprocessed_states
if other_nn_inputs is not None:
# TODO: Do this automatically when using the `+` operator on DataOpRecords.
nn_inputs = self.tuple_merger.merge(nn_inputs, other_nn_inputs)
out = self.policy.get_action(nn_inputs)
actions = self.exploration.get_action(out["action"], time_percentage, use_exploration)
return dict(
preprocessed_state=preprocessed_states, action=actions, nn_outputs=out["nn_outputs"]
)
@rlgraph_api
def get_preprocessed_state_action_and_action_probs(
self, states, other_nn_inputs=None, time_percentage=None, use_exploration=True
):
"""
API-method to get the preprocessed state, one action and all possible action's probabilities based on a
raw state from an Env.
Args:
states (DataOp): The states coming directly from the environment.
other_nn_inputs (DataOp): Inputs to the NN that don't have to be pushed through the preprocessor.
time_percentage (SingleDataOp): The current consumed time (0.0 to 1.0) with respect to a max timestep
value.
use_exploration (Optional[DataOp]): Whether to use exploration or not.
Returns:
dict (4x DataOp):
`preprocessed_state` (DataOp): The preprocessed states.
`action` (DataOp): The chosen action.
`action_probs` (DataOp): The different action probabilities.
#`last_internal_states` (DataOp): If RNN-based, the last internal states after passing through
#states. Or None.
"""
preprocessed_states = self.preprocessor.preprocess(states)
nn_inputs = preprocessed_states
# merge preprocessed_states + other_nn_inputs
if other_nn_inputs is not None:
# TODO: Do this automatically when using the `+` operator on DataOpRecords.
nn_inputs = self.tuple_merger.merge(nn_inputs, other_nn_inputs)
# TODO: Dynamic Batching problem. State-value is not really needed, but dynamic batching will require us to
# TODO: run through the exact same partial-graph as the learner (which does need the extra state-value output).
# if isinstance(self.policy, SharedValueFunctionPolicy):
# out = self.policy.get_state_values_logits_probabilities_log_probs(preprocessed_states, internal_states)
# else:
# out = self.policy.get_logits_parameters_log_probs(preprocessed_states, internal_states)
# action_sample = self.policy.get_action_from_logits_and_parameters(out["logits"], out["parameters"])
out = self.policy.get_action_and_log_likelihood(nn_inputs)
actions = self.exploration.get_action(out["action"], time_percentage, use_exploration)
return dict(
preprocessed_state=preprocessed_states, action=actions, action_probs=out["action_probabilities"],
nn_outputs=out["nn_outputs"]
)
|
2020/03/09/How to Copy Django Model Instance Objects/django_clone_example/clone_object/example/views.py | kenjitagawa/youtube_video_code | 492 | 12690197 | <reponame>kenjitagawa/youtube_video_code<gh_stars>100-1000
from django.shortcuts import render
from .models import Member
def index(request):
#member_one = Member(name='Anthony', location='Las Vegas')
#member_one.save()
member_three = Member.objects.get(pk=1)
#member_two = member_one
member_three.pk = None
member_three.id = None
member_three.name = 'Anthony'
member_three.location = 'Las Vegas'
member_three.save()
member_one = Member.objects.get(pk=1)
context = {
'member_one' : member_one,
'member_three' : member_three
}
return render(request, 'index.html', context)
|
tests/async/test_locators.py | microsoft/playwright-python | 6,243 | 12690200 | # Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from playwright._impl._path_utils import get_file_dirname
from playwright.async_api import Error, Page
from tests.server import Server
_dirname = get_file_dirname()
FILE_TO_UPLOAD = _dirname / ".." / "assets/file-to-upload.txt"
async def test_locators_click_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/button.html")
button = page.locator("button")
await button.click()
assert await page.evaluate("window['result']") == "Clicked"
async def test_locators_click_should_work_with_node_removed(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/button.html")
await page.evaluate("delete window['Node']")
button = page.locator("button")
await button.click()
assert await page.evaluate("window['result']") == "Clicked"
async def test_locators_click_should_work_for_text_nodes(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/button.html")
await page.evaluate(
"""() => {
window['double'] = false;
const button = document.querySelector('button');
button.addEventListener('dblclick', event => {
window['double'] = true;
});
}"""
)
button = page.locator("button")
await button.dblclick()
assert await page.evaluate("double") is True
assert await page.evaluate("result") == "Clicked"
async def test_locators_should_have_repr(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/button.html")
button = page.locator("button")
await button.click()
assert (
str(button)
== f"<Locator frame=<Frame name= url='{server.PREFIX}/input/button.html'> selector='button'>"
)
async def test_locators_get_attribute_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/dom.html")
button = page.locator("#outer")
assert await button.get_attribute("name") == "value"
assert await button.get_attribute("foo") is None
async def test_locators_input_value_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/dom.html")
await page.fill("#textarea", "input value")
text_area = page.locator("#textarea")
assert await text_area.input_value() == "input value"
async def test_locators_inner_html_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/dom.html")
locator = page.locator("#outer")
assert await locator.inner_html() == '<div id="inner">Text,\nmore text</div>'
async def test_locators_inner_text_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/dom.html")
locator = page.locator("#inner")
assert await locator.inner_text() == "Text, more text"
async def test_locators_text_content_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/dom.html")
locator = page.locator("#inner")
assert await locator.text_content() == "Text,\nmore text"
async def test_locators_is_hidden_and_is_visible_should_work(page: Page):
await page.set_content("<div>Hi</div><span></span>")
div = page.locator("div")
assert await div.is_visible() is True
assert await div.is_hidden() is False
span = page.locator("span")
assert await span.is_visible() is False
assert await span.is_hidden() is True
async def test_locators_is_enabled_and_is_disabled_should_work(page: Page):
await page.set_content(
"""
<button disabled>button1</button>
<button>button2</button>
<div>div</div>
"""
)
div = page.locator("div")
assert await div.is_enabled() is True
assert await div.is_disabled() is False
button1 = page.locator(':text("button1")')
assert await button1.is_enabled() is False
assert await button1.is_disabled() is True
button1 = page.locator(':text("button2")')
assert await button1.is_enabled() is True
assert await button1.is_disabled() is False
async def test_locators_is_editable_should_work(page: Page):
await page.set_content(
"""
<input id=input1 disabled><textarea></textarea><input id=input2>
"""
)
input1 = page.locator("#input1")
assert await input1.is_editable() is False
input2 = page.locator("#input2")
assert await input2.is_editable() is True
async def test_locators_is_checked_should_work(page: Page):
await page.set_content(
"""
<input type='checkbox' checked><div>Not a checkbox</div>
"""
)
element = page.locator("input")
assert await element.is_checked() is True
await element.evaluate("e => e.checked = false")
assert await element.is_checked() is False
async def test_locators_all_text_contents_should_work(page: Page):
await page.set_content(
"""
<div>A</div><div>B</div><div>C</div>
"""
)
element = page.locator("div")
assert await element.all_text_contents() == ["A", "B", "C"]
async def test_locators_all_inner_texts(page: Page):
await page.set_content(
"""
<div>A</div><div>B</div><div>C</div>
"""
)
element = page.locator("div")
assert await element.all_inner_texts() == ["A", "B", "C"]
async def test_locators_should_query_existing_element(page: Page, server: Server):
await page.goto(server.PREFIX + "/playground.html")
await page.set_content(
"""<html><body><div class="second"><div class="inner">A</div></div></body></html>"""
)
html = page.locator("html")
second = html.locator(".second")
inner = second.locator(".inner")
assert (
await page.evaluate("e => e.textContent", await inner.element_handle()) == "A"
)
async def test_locators_evaluate_handle_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/dom.html")
outer = page.locator("#outer")
inner = outer.locator("#inner")
check = inner.locator("#check")
text = await inner.evaluate_handle("e => e.firstChild")
await page.evaluate("1 + 1")
assert (
str(outer)
== f"<Locator frame=<Frame name= url='{server.PREFIX}/dom.html'> selector='#outer'>"
)
assert (
str(inner)
== f"<Locator frame=<Frame name= url='{server.PREFIX}/dom.html'> selector='#outer >> #inner'>"
)
assert str(text) == "JSHandle@#text=Text,↵more text"
assert (
str(check)
== f"<Locator frame=<Frame name= url='{server.PREFIX}/dom.html'> selector='#outer >> #inner >> #check'>"
)
async def test_locators_should_query_existing_elements(page: Page):
await page.set_content(
"""<html><body><div>A</div><br/><div>B</div></body></html>"""
)
html = page.locator("html")
elements = await html.locator("div").element_handles()
assert len(elements) == 2
result = []
for element in elements:
result.append(await page.evaluate("e => e.textContent", element))
assert result == ["A", "B"]
async def test_locators_return_empty_array_for_non_existing_elements(page: Page):
await page.set_content(
"""<html><body><div>A</div><br/><div>B</div></body></html>"""
)
html = page.locator("html")
elements = await html.locator("abc").element_handles()
assert len(elements) == 0
assert elements == []
async def test_locators_evaluate_all_should_work(page: Page):
await page.set_content(
"""<html><body><div class="tweet"><div class="like">100</div><div class="like">10</div></div></body></html>"""
)
tweet = page.locator(".tweet .like")
content = await tweet.evaluate_all("nodes => nodes.map(n => n.innerText)")
assert content == ["100", "10"]
async def test_locators_evaluate_all_should_work_with_missing_selector(page: Page):
await page.set_content(
"""<div class="a">not-a-child-div</div><div id="myId"></div"""
)
tweet = page.locator("#myId .a")
nodes_length = await tweet.evaluate_all("nodes => nodes.length")
assert nodes_length == 0
async def test_locators_hover_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/scrollable.html")
button = page.locator("#button-6")
await button.hover()
assert (
await page.evaluate("document.querySelector('button:hover').id") == "button-6"
)
async def test_locators_fill_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/textarea.html")
button = page.locator("input")
await button.fill("some value")
assert await page.evaluate("result") == "some value"
async def test_locators_check_should_work(page: Page):
await page.set_content("<input id='checkbox' type='checkbox'></input>")
button = page.locator("input")
await button.check()
assert await page.evaluate("checkbox.checked") is True
async def test_locators_uncheck_should_work(page: Page):
await page.set_content("<input id='checkbox' type='checkbox' checked></input>")
button = page.locator("input")
await button.uncheck()
assert await page.evaluate("checkbox.checked") is False
async def test_locators_select_option_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/select.html")
select = page.locator("select")
await select.select_option("blue")
assert await page.evaluate("result.onInput") == ["blue"]
assert await page.evaluate("result.onChange") == ["blue"]
async def test_locators_focus_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/button.html")
button = page.locator("button")
assert await button.evaluate("button => document.activeElement === button") is False
await button.focus()
assert await button.evaluate("button => document.activeElement === button") is True
async def test_locators_dispatch_event_should_work(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/button.html")
button = page.locator("button")
await button.dispatch_event("click")
assert await page.evaluate("result") == "Clicked"
async def test_locators_should_upload_a_file(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/fileupload.html")
input = page.locator("input[type=file]")
file_path = os.path.relpath(FILE_TO_UPLOAD, os.getcwd())
await input.set_input_files(file_path)
assert (
await page.evaluate("e => e.files[0].name", await input.element_handle())
== "file-to-upload.txt"
)
async def test_locators_should_press(page: Page):
await page.set_content("<input type='text' />")
await page.locator("input").press("h")
await page.eval_on_selector("input", "input => input.value") == "h"
async def test_locators_should_scroll_into_view(page: Page, server: Server):
await page.goto(server.PREFIX + "/offscreenbuttons.html")
for i in range(11):
button = page.locator(f"#btn{i}")
before = await button.evaluate(
"button => button.getBoundingClientRect().right - window.innerWidth"
)
assert before == 10 * i
await button.scroll_into_view_if_needed()
after = await button.evaluate(
"button => button.getBoundingClientRect().right - window.innerWidth"
)
assert after <= 0
await page.evaluate("window.scrollTo(0, 0)")
async def test_locators_should_select_textarea(
page: Page, server: Server, browser_name: str
):
await page.goto(server.PREFIX + "/input/textarea.html")
textarea = page.locator("textarea")
await textarea.evaluate("textarea => textarea.value = 'some value'")
await textarea.select_text()
if browser_name == "firefox":
assert await textarea.evaluate("el => el.selectionStart") == 0
assert await textarea.evaluate("el => el.selectionEnd") == 10
else:
assert await page.evaluate("window.getSelection().toString()") == "some value"
async def test_locators_should_type(page: Page):
await page.set_content("<input type='text' />")
await page.locator("input").type("hello")
await page.eval_on_selector("input", "input => input.value") == "hello"
async def test_locators_should_screenshot(
page: Page, server: Server, assert_to_be_golden
):
await page.set_viewport_size(
{
"width": 500,
"height": 500,
}
)
await page.goto(server.PREFIX + "/grid.html")
await page.evaluate("window.scrollBy(50, 100)")
element = page.locator(".box:nth-of-type(3)")
assert_to_be_golden(
await element.screenshot(), "screenshot-element-bounding-box.png"
)
async def test_locators_should_return_bounding_box(page: Page, server: Server):
await page.set_viewport_size(
{
"width": 500,
"height": 500,
}
)
await page.goto(server.PREFIX + "/grid.html")
element = page.locator(".box:nth-of-type(13)")
box = await element.bounding_box()
assert box == {
"x": 100,
"y": 50,
"width": 50,
"height": 50,
}
async def test_locators_should_respect_first_and_last(page: Page):
await page.set_content(
"""
<section>
<div><p>A</p></div>
<div><p>A</p><p>A</p></div>
<div><p>A</p><p>A</p><p>A</p></div>
</section>"""
)
assert await page.locator("div >> p").count() == 6
assert await page.locator("div").locator("p").count() == 6
assert await page.locator("div").first.locator("p").count() == 1
assert await page.locator("div").last.locator("p").count() == 3
async def test_locators_should_respect_nth(page: Page):
await page.set_content(
"""
<section>
<div><p>A</p></div>
<div><p>A</p><p>A</p></div>
<div><p>A</p><p>A</p><p>A</p></div>
</section>"""
)
assert await page.locator("div >> p").nth(0).count() == 1
assert await page.locator("div").nth(1).locator("p").count() == 2
assert await page.locator("div").nth(2).locator("p").count() == 3
async def test_locators_should_throw_on_capture_without_nth(page: Page):
await page.set_content(
"""
<section><div><p>A</p></div></section>
"""
)
with pytest.raises(Error, match="Can't query n-th element"):
await page.locator("*css=div >> p").nth(1).click()
async def test_locators_should_throw_due_to_strictness(page: Page):
await page.set_content(
"""
<div>A</div><div>B</div>
"""
)
with pytest.raises(Error, match="strict mode violation"):
await page.locator("div").is_visible()
async def test_locators_should_throw_due_to_strictness_2(page: Page):
await page.set_content(
"""
<select><option>One</option><option>Two</option></select>
"""
)
with pytest.raises(Error, match="strict mode violation"):
await page.locator("option").evaluate("e => {}")
async def test_locators_set_checked(page: Page):
await page.set_content("`<input id='checkbox' type='checkbox'></input>`")
locator = page.locator("input")
await locator.set_checked(True)
assert await page.evaluate("checkbox.checked")
await locator.set_checked(False)
assert await page.evaluate("checkbox.checked") is False
async def test_locators_wait_for(page: Page) -> None:
await page.set_content("<div></div>")
locator = page.locator("div")
task = locator.wait_for()
await page.eval_on_selector("div", "div => div.innerHTML = '<span>target</span>'")
await task
assert await locator.text_content() == "target"
|
test/test_naarad_api.py | richardhsu/naarad | 180 | 12690206 | # coding=utf-8
"""
Copyright 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ConfigParser
import os
import sys
import time
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src')))
from naarad import Naarad
import naarad.naarad_constants as CONSTANTS
naarad_obj = None
def setup_module():
global naarad_obj
naarad_obj = Naarad()
def test_naarad_apis():
"""
:return: None
"""
examples_directory = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'examples')
config_file = os.path.join(os.path.join(examples_directory, 'conf'), 'config-gc')
config_object = ConfigParser.ConfigParser()
config_object.optionxform = str
config_object.read(config_file)
input_directory = os.path.join(examples_directory, 'logs')
output_directory = 'test_api_temp'
diff_output_directory = 'test_api_temp/diff_location'
report1_location = 'test_api_temp/0'
report2_location = 'test_api_temp/1'
global naarad_obj
test_id_1 = naarad_obj.signal_start(config_file)
time.sleep(60)
naarad_obj.signal_stop(test_id_1)
test_id_2 = naarad_obj.signal_start(config_object)
time.sleep(60)
naarad_obj.signal_stop(test_id_2)
if naarad_obj.analyze(input_directory, output_directory) != CONSTANTS.OK:
print naarad_obj.get_failed_analyses()
naarad_obj.get_sla_data(test_id_1)
naarad_obj.get_stats_data(test_id_1)
naarad_obj.get_sla_data(test_id_2)
naarad_obj.get_stats_data(test_id_2)
if naarad_obj.diff(test_id_1, test_id_2, None) != CONSTANTS.OK:
print 'Error encountered during diff'
if naarad_obj.diff_reports_by_location(report1_location, report2_location, diff_output_directory, None):
print 'Error encountered during diff'
print 'Please inspect the generated reports manually'
|
api/allennlp_demo/bidaf_elmo/test_api.py | dragon18456/allennlp-demo | 190 | 12690228 | from overrides import overrides
import pytest
from allennlp_demo.bidaf_elmo.api import BidafElmoModelEndpoint
from allennlp_demo.common.testing import RcModelEndpointTestCase
class TestBidafElmoModelEndpoint(RcModelEndpointTestCase):
endpoint = BidafElmoModelEndpoint()
@pytest.mark.skip("Takes too long")
@overrides
def test_interpret(self):
pass
@pytest.mark.skip("Takes too long")
@overrides
def test_attack(self):
pass
|
crabageprediction/venv/Lib/site-packages/fontTools/otlLib/__init__.py | 13rianlucero/CrabAgePrediction | 38,667 | 12690244 | """OpenType Layout-related functionality."""
|
src/unittest/python/install_utils_tests.py | klr8/pybuilder | 1,419 | 12690246 | <reponame>klr8/pybuilder
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from os.path import normcase as nc, join as jp
from pybuilder.core import (Project,
Logger,
Dependency,
RequirementsFile)
from pybuilder.install_utils import install_dependencies
from pybuilder.pip_utils import PIP_MODULE_STANZA
from pybuilder.plugins.python.install_dependencies_plugin import initialize_install_dependencies_plugin
from test_utils import Mock, ANY, patch
__author__ = "<NAME>"
class InstallDependencyTest(unittest.TestCase):
def setUp(self):
self.project = Project("unittest", ".")
self.project.set_property("dir_install_logs", "any_directory")
self.project.set_property("dir_target", "/any_target_directory")
self.logger = Mock(Logger)
self.pyb_env = Mock()
self.pyb_env.executable = ["exec"]
self.pyb_env.site_paths = []
self.pyb_env.env_dir = "a"
self.pyb_env.execute_command.return_value = 0
initialize_install_dependencies_plugin(self.project)
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_install_requirements_file_dependency(self, *_):
dependency = RequirementsFile("requirements.txt")
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA +
["install", "-r", "requirements.txt"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_install_dependency_without_version(self, *_):
dependency = Dependency("spam")
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch",
constraints_file_name="constraint_file")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA +
["install", "-c", nc(jp(self.pyb_env.env_dir, "constraint_file")), "spam"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_install_dependency_without_version_on_windows_derivate(self, *_):
dependency = Dependency("spam")
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "spam"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_install_dependency_insecurely_when_property_is_set(self, *_):
dependency = Dependency("spam")
self.project.set_property("install_dependencies_insecure_installation", ["spam"])
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA +
["install", "--allow-unverified", "spam", "--allow-external", "spam", "spam"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_install_dependency_securely_when_property_is_not_set_to_dependency(self, *_):
dependency = Dependency("spam")
self.project.set_property("install_dependencies_insecure_installation", ["some-other-dependency"])
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch",
constraints_file_name="constraint_file")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA +
["install", "-c", ANY, "--allow-unverified", "some-other-dependency",
"--allow-external", "some-other-dependency", "spam"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
# some-other-dependency might be a dependency of "spam"
# so we always have to put the insecure dependencies in the command line :-(
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_install_dependency_using_custom_index_url(self, *_):
self.project.set_property("install_dependencies_index_url", "some_index_url")
dependency = Dependency("spam")
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA +
["install", "--index-url", "some_index_url", "spam"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_use_extra_index_url_when_index_url_is_not_set(self, *_):
self.project.set_property("install_dependencies_extra_index_url", "some_extra_index_url")
dependency = Dependency("spam")
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA +
["install", "--extra-index-url", "some_extra_index_url", "spam"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_use_index_and_extra_index_url_when_index_and_extra_index_url_are_set(self, *_):
self.project.set_property("install_dependencies_index_url", "some_index_url")
self.project.set_property("install_dependencies_extra_index_url", "some_extra_index_url")
dependency = Dependency("spam")
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA +
["install", "--index-url", "some_index_url", "--extra-index-url", "some_extra_index_url", "spam"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_install_dependency_with_version(self, *_):
dependency = Dependency("spam", "0.1.2")
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA +
["install", "spam>=0.1.2"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_install_dependency_with_version_and_operator(self, *_):
dependency = Dependency("spam", "==0.1.2")
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "spam==0.1.2"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
def test_should_install_dependency_with_wrong_version_and_operator(self):
self.assertRaises(ValueError, Dependency, "spam", "~=1")
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_install_dependency_with_url(self, *_):
dependency = Dependency("spam", url="some_url")
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA +
["install", "--force-reinstall", "some_url"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
@patch("pybuilder.install_utils.tail_log")
@patch("pybuilder.install_utils.open")
@patch("pybuilder.install_utils.create_constraint_file")
@patch("pybuilder.install_utils.get_packages_info", return_value={})
def test_should_install_dependency_with_url_even_if_version_is_given(self, *_):
dependency = Dependency("spam", version="0.1.2", url="some_url")
install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch")
self.pyb_env.execute_command.assert_called_with(
self.pyb_env.executable + PIP_MODULE_STANZA +
["install", "--force-reinstall", "some_url"],
cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
|
tests/brevitas/test_brevitas_validate_mobilenet.py | mmrahorovic/finn | 109 | 12690247 | # Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import brevitas.onnx as bo
import csv
import numpy as np
import os
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import finn.core.onnx_exec as oxe
import finn.transformation.streamline.absorb as absorb
import finn.util.imagenet as imagenet_util
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fold_constants import FoldConstants
from finn.transformation.general import (
GiveReadableTensorNames,
GiveUniqueNodeNames,
GiveUniqueParameterTensors,
RemoveStaticGraphInputs,
)
from finn.transformation.infer_data_layouts import InferDataLayouts
from finn.transformation.infer_datatypes import InferDataTypes
from finn.transformation.infer_shapes import InferShapes
from finn.transformation.insert_topk import InsertTopK
from finn.transformation.merge_onnx_models import MergeONNXModels
from finn.util.basic import make_build_dir
from finn.util.pytorch import NormalizePreProc
from finn.util.test import get_test_model_trained
# normalization (preprocessing) settings for MobileNet-v1 w4a4
mean = [0.485, 0.456, 0.406]
std = 0.226
ch = 3
def test_brevitas_mobilenet_preproc():
if "IMAGENET_VAL_PATH" not in os.environ.keys():
pytest.skip("Can't do validation without IMAGENET_VAL_PATH")
n_images = 1000
# Brevitas-style: use torchvision pipeline
std_arr = [std, std, std]
normalize = transforms.Normalize(mean=mean, std=std_arr)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
os.environ["IMAGENET_VAL_PATH"] + "/../",
transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
),
),
batch_size=1,
shuffle=False,
num_workers=0,
)
# FINN-style: load_resize_crop then normalization as PyTorch graph
preproc = NormalizePreProc(mean, std, ch)
finn_loader = imagenet_util.get_val_images(n_images)
val_loader = iter(val_loader)
for i in range(n_images):
(img_path, finn_target) = next(finn_loader)
finn_img = imagenet_util.load_resize_crop(img_path)
finn_img = preproc.forward(torch.from_numpy(finn_img).float())
(pyt_img, pyt_target) = next(val_loader)
assert finn_img.shape == pyt_img.shape
assert (finn_img == pyt_img).all()
@pytest.mark.slow
# marked as XFAIL until Brevitas export issues are resolved:
# https://github.com/Xilinx/brevitas/issues/173
@pytest.mark.xfail
def test_brevitas_compare_exported_mobilenet():
if "IMAGENET_VAL_PATH" not in os.environ.keys():
pytest.skip("Can't do validation without IMAGENET_VAL_PATH")
n_images = 10
debug_mode = False
export_onnx_path = make_build_dir("test_brevitas_mobilenet-v1_")
# export preprocessing
preproc_onnx = export_onnx_path + "/quant_mobilenet_v1_4b_preproc.onnx"
preproc = NormalizePreProc(mean, std, ch)
bo.export_finn_onnx(preproc, (1, 3, 224, 224), preproc_onnx)
preproc_model = ModelWrapper(preproc_onnx)
preproc_model = preproc_model.transform(InferShapes())
preproc_model = preproc_model.transform(GiveUniqueNodeNames())
preproc_model = preproc_model.transform(GiveUniqueParameterTensors())
preproc_model = preproc_model.transform(GiveReadableTensorNames())
# export the actual MobileNet-v1
finn_onnx = export_onnx_path + "/quant_mobilenet_v1_4b.onnx"
mobilenet = get_test_model_trained("mobilenet", 4, 4)
if debug_mode:
dbg_hook = bo.enable_debug(mobilenet)
bo.export_finn_onnx(mobilenet, (1, 3, 224, 224), finn_onnx)
model = ModelWrapper(finn_onnx)
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model = model.transform(RemoveStaticGraphInputs())
model = model.transform(InsertTopK())
# get initializer from Mul that will be absorbed into topk
a0 = model.get_initializer(model.get_nodes_by_op_type("Mul")[-1].input[1])
model = model.transform(absorb.AbsorbScalarMulAddIntoTopK())
model = model.transform(InferShapes())
model = model.transform(InferDataTypes())
model = model.transform(InferDataLayouts())
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveUniqueParameterTensors())
model = model.transform(GiveReadableTensorNames())
model.save(export_onnx_path + "/quant_mobilenet_v1_4b_wo_preproc.onnx")
# create merged preprocessing + MobileNet-v1 model
model = model.transform(MergeONNXModels(preproc_model))
model.save(export_onnx_path + "/quant_mobilenet_v1_4b.onnx")
with open(
export_onnx_path + "/mobilenet_validation.csv", "w", newline=""
) as csvfile:
writer = csv.writer(csvfile)
writer.writerow(
[
"goldenID",
"brevitasTop5",
"brevitasTop5[%]",
"finnTop5",
"finnTop5[%]",
"top5equal",
"top5%equal",
]
)
csvfile.flush()
workload = imagenet_util.get_val_images(n_images, interleave_classes=True)
all_inds_ok = True
all_probs_ok = True
for (img_path, target_id) in workload:
img_np = imagenet_util.load_resize_crop(img_path)
img_torch = torch.from_numpy(img_np).float()
# do forward pass in PyTorch/Brevitas
input_tensor = preproc.forward(img_torch)
expected = mobilenet.forward(input_tensor).detach().numpy()
expected_topk = expected.flatten()
expected_top5 = np.argsort(expected_topk)[-5:]
expected_top5 = np.flip(expected_top5)
expected_top5_prob = []
for index in expected_top5:
expected_top5_prob.append(expected_topk[index])
idict = {model.graph.input[0].name: img_np}
odict = oxe.execute_onnx(model, idict, return_full_exec_context=True)
produced = odict[model.graph.output[0].name]
produced_prob = odict["TopK_0_out0"] * a0
inds_ok = (produced.flatten() == expected_top5).all()
probs_ok = np.isclose(produced_prob.flatten(), expected_top5_prob).all()
all_inds_ok = all_inds_ok and inds_ok
all_probs_ok = all_probs_ok and probs_ok
writer.writerow(
[
str(target_id),
str(expected_top5),
str(expected_top5_prob),
str(produced.flatten()),
str(produced_prob.flatten()),
str(inds_ok),
str(probs_ok),
]
)
csvfile.flush()
if ((not inds_ok) or (not probs_ok)) and debug_mode:
print("Results differ for %s" % img_path)
# check all tensors at debug markers
names_brevitas = set(dbg_hook.values.keys())
names_finn = set(odict.keys())
names_common = names_brevitas.intersection(names_finn)
for dbg_name in names_common:
if not np.isclose(
dbg_hook.values[dbg_name].detach().numpy(),
odict[dbg_name],
atol=1e-3,
).all():
print("Tensor %s differs between Brevitas and FINN" % dbg_name)
assert all_inds_ok and all_probs_ok
|
using-python-to-interact-with-the-operating-system/week-five/charfreq.py | veera-raju/google-it-automation-with-python | 164 | 12690250 | #!/usr/bin/env python3
def char_frequency(filename):
"""
Counts the frequency of each character in the given file.
"""
# First try to open the file
try:
f = open(filename)
# code in the except block is only executed if one of the instructions in the try block raise an error of the matching type
except OSError:
return None
# Now process the file
characters = {}
for line in f:
for char in line:
characters[char] = characters.get(char, 0) + 1
f.close()
return characters |
fast_transformers/feature_maps/__init__.py | SamuelCahyawijaya/fast-transformers | 1,171 | 12690251 | <gh_stars>1000+
#
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
"""Implementations of feature maps to be used with linear attention and causal
linear attention."""
from .base import elu_feature_map, ActivationFunctionFeatureMap
from .fourier_features import RandomFourierFeatures, Favor, \
SmoothedRandomFourierFeatures, GeneralizedRandomFeatures
|
lenstronomy/GalKin/aperture_types.py | heather999/lenstronomy | 107 | 12690255 | <filename>lenstronomy/GalKin/aperture_types.py<gh_stars>100-1000
__author__ = 'sibirrer'
import numpy as np
from lenstronomy.Util.package_util import exporter
export, __all__ = exporter()
@export
class Slit(object):
"""
Slit aperture description
"""
def __init__(self, length, width, center_ra=0, center_dec=0, angle=0):
"""
:param length: length of slit
:param width: width of slit
:param center_ra: center of slit
:param center_dec: center of slit
:param angle: orientation angle of slit, angle=0 corresponds length in RA direction
"""
self._length = length
self._width = width
self._center_ra, self._center_dec = center_ra, center_dec
self._angle = angle
def aperture_select(self, ra, dec):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:return: bool, True if photon/ray is within the slit, False otherwise
"""
return slit_select(ra, dec, self._length, self._width, self._center_ra, self._center_dec, self._angle), 0
@property
def num_segments(self):
"""
number of segments with separate measurements of the velocity dispersion
:return: int
"""
return 1
@export
def slit_select(ra, dec, length, width, center_ra=0, center_dec=0, angle=0):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:param length: length of slit
:param width: width of slit
:param center_ra: center of slit
:param center_dec: center of slit
:param angle: orientation angle of slit, angle=0 corresponds length in RA direction
:return: bool, True if photon/ray is within the slit, False otherwise
"""
ra_ = ra - center_ra
dec_ = dec - center_dec
x = np.cos(angle) * ra_ + np.sin(angle) * dec_
y = - np.sin(angle) * ra_ + np.cos(angle) * dec_
if abs(x) < length / 2. and abs(y) < width / 2.:
return True
else:
return False
@export
class Frame(object):
"""
rectangular box with a hole in the middle (also rectangular), effectively a frame
"""
def __init__(self, width_outer, width_inner, center_ra=0, center_dec=0, angle=0):
"""
:param width_outer: width of box to the outer parts
:param width_inner: width of inner removed box
:param center_ra: center of slit
:param center_dec: center of slit
:param angle: orientation angle of slit, angle=0 corresponds length in RA direction
"""
self._width_outer = width_outer
self._width_inner = width_inner
self._center_ra, self._center_dec = center_ra, center_dec
self._angle = angle
def aperture_select(self, ra, dec):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:return: bool, True if photon/ray is within the slit, False otherwise
"""
return frame_select(ra, dec, self._width_outer, self._width_inner, self._center_ra, self._center_dec, self._angle), 0
@property
def num_segments(self):
"""
number of segments with separate measurements of the velocity dispersion
:return: int
"""
return 1
@export
def frame_select(ra, dec, width_outer, width_inner, center_ra=0, center_dec=0, angle=0):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:param width_outer: width of box to the outer parts
:param width_inner: width of inner removed box
:param center_ra: center of slit
:param center_dec: center of slit
:param angle: orientation angle of slit, angle=0 corresponds length in RA direction
:return: bool, True if photon/ray is within the box with a hole, False otherwise
"""
ra_ = ra - center_ra
dec_ = dec - center_dec
x = np.cos(angle) * ra_ + np.sin(angle) * dec_
y = - np.sin(angle) * ra_ + np.cos(angle) * dec_
if abs(x) < width_outer / 2. and abs(y) < width_outer / 2.:
if abs(x) < width_inner / 2. and abs(y) < width_inner / 2.:
return False
else:
return True
return False
@export
class Shell(object):
"""
Shell aperture
"""
def __init__(self, r_in, r_out, center_ra=0, center_dec=0):
"""
:param r_in: innermost radius to be selected
:param r_out: outermost radius to be selected
:param center_ra: center of the sphere
:param center_dec: center of the sphere
"""
self._r_in, self._r_out = r_in, r_out
self._center_ra, self._center_dec = center_ra, center_dec
def aperture_select(self, ra, dec):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:return: bool, True if photon/ray is within the slit, False otherwise
"""
return shell_select(ra, dec, self._r_in, self._r_out, self._center_ra, self._center_dec), 0
@property
def num_segments(self):
"""
number of segments with separate measurements of the velocity dispersion
:return: int
"""
return 1
@export
def shell_select(ra, dec, r_in, r_out, center_ra=0, center_dec=0):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:param r_in: innermost radius to be selected
:param r_out: outermost radius to be selected
:param center_ra: center of the sphere
:param center_dec: center of the sphere
:return: boolean, True if within the radial range, False otherwise
"""
x = ra - center_ra
y = dec - center_dec
R = np.sqrt(x ** 2 + y ** 2)
if (R >= r_in) and (R < r_out):
return True
else:
return False
@export
class IFUShells(object):
"""
class for an Integral Field Unit spectrograph with azimuthal shells where the kinematics are measured
"""
def __init__(self, r_bins, center_ra=0, center_dec=0):
"""
:param r_bins: array of radial bins to average the dispersion spectra in ascending order.
It starts with the inner-most edge to the outermost edge.
:param center_ra: center of the sphere
:param center_dec: center of the sphere
"""
self._r_bins = r_bins
self._center_ra, self._center_dec = center_ra, center_dec
def aperture_select(self, ra, dec):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:return: bool, True if photon/ray is within the slit, False otherwise, index of shell
"""
return shell_ifu_select(ra, dec, self._r_bins, self._center_ra, self._center_dec)
@property
def num_segments(self):
"""
number of segments with separate measurements of the velocity dispersion
:return: int
"""
return len(self._r_bins) - 1
@export
def shell_ifu_select(ra, dec, r_bin, center_ra=0, center_dec=0):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:param r_bin: array of radial bins to average the dispersion spectra in ascending order.
It starts with the inner-most edge to the outermost edge.
:param center_ra: center of the sphere
:param center_dec: center of the sphere
:return: boolean, True if within the radial range, False otherwise
"""
x = ra - center_ra
y = dec - center_dec
R = np.sqrt(x ** 2 + y ** 2)
for i in range(0, len(r_bin) - 1):
if (R >= r_bin[i]) and (R < r_bin[i+1]):
return True, i
return False, None
|
apprise/plugins/NotifySendGrid.py | linkmauve/apprise | 4,764 | 12690263 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# You will need an API Key for this plugin to work.
# From the Settings -> API Keys you can click "Create API Key" if you don't
# have one already. The key must have at least the "Mail Send" permission
# to work.
#
# The schema to use the plugin looks like this:
# {schema}://{apikey}:{from_email}
#
# Your {from_email} must be comprissed of your Sendgrid Authenticated
# Domain. The same domain must have 'Link Branding' turned on as well or it
# will not work. This can be seen from Settings -> Sender Authentication.
# If you're (SendGrid) verified domain is example.com, then your schema may
# look something like this:
# Simple API Reference:
# - https://sendgrid.com/docs/API_Reference/Web_API_v3/index.html
# - https://sendgrid.com/docs/ui/sending-email/\
# how-to-send-an-email-with-dynamic-transactional-templates/
import requests
from json import dumps
from .NotifyBase import NotifyBase
from ..common import NotifyFormat
from ..common import NotifyType
from ..utils import parse_list
from ..utils import is_email
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
# Extend HTTP Error Messages
SENDGRID_HTTP_ERROR_MAP = {
401: 'Unauthorized - You do not have authorization to make the request.',
413: 'Payload To Large - The JSON payload you have included in your '
'request is too large.',
429: 'Too Many Requests - The number of requests you have made exceeds '
'SendGrid’s rate limitations.',
}
class NotifySendGrid(NotifyBase):
"""
A wrapper for Notify SendGrid Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'SendGrid'
# The services URL
service_url = 'https://sendgrid.com'
# The default secure protocol
secure_protocol = 'sendgrid'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_sendgrid'
# Default to markdown
notify_format = NotifyFormat.HTML
# The default Email API URL to use
notify_url = 'https://api.sendgrid.com/v3/mail/send'
# Allow 300 requests per minute.
# 60/300 = 0.2
request_rate_per_sec = 0.2
# The default subject to use if one isn't specified.
default_empty_subject = '<no subject>'
# Define object templates
templates = (
'{schema}://{apikey}:{from_email}',
'{schema}://{apikey}:{from_email}/{targets}',
)
# Define our template arguments
template_tokens = dict(NotifyBase.template_tokens, **{
'apikey': {
'name': _('API Key'),
'type': 'string',
'private': True,
'required': True,
'regex': (r'^[A-Z0-9._-]+$', 'i'),
},
'from_email': {
'name': _('Source Email'),
'type': 'string',
'required': True,
},
'target_email': {
'name': _('Target Email'),
'type': 'string',
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'to': {
'alias_of': 'targets',
},
'cc': {
'name': _('Carbon Copy'),
'type': 'list:string',
},
'bcc': {
'name': _('Blind Carbon Copy'),
'type': 'list:string',
},
'template': {
# Template ID
# The template ID is 64 characters with one dash (d-uuid)
'name': _('Template'),
'type': 'string',
},
})
# Support Template Dynamic Variables (Substitutions)
template_kwargs = {
'template_data': {
'name': _('Template Data'),
'prefix': '+',
},
}
def __init__(self, apikey, from_email, targets=None, cc=None,
bcc=None, template=None, template_data=None, **kwargs):
"""
Initialize Notify SendGrid Object
"""
super(NotifySendGrid, self).__init__(**kwargs)
# API Key (associated with project)
self.apikey = validate_regex(
apikey, *self.template_tokens['apikey']['regex'])
if not self.apikey:
msg = 'An invalid SendGrid API Key ' \
'({}) was specified.'.format(apikey)
self.logger.warning(msg)
raise TypeError(msg)
result = is_email(from_email)
if not result:
msg = 'Invalid ~From~ email specified: {}'.format(from_email)
self.logger.warning(msg)
raise TypeError(msg)
# Store email address
self.from_email = result['full_email']
# Acquire Targets (To Emails)
self.targets = list()
# Acquire Carbon Copies
self.cc = set()
# Acquire Blind Carbon Copies
self.bcc = set()
# Now our dynamic template (if defined)
self.template = template
# Now our dynamic template data (if defined)
self.template_data = template_data \
if isinstance(template_data, dict) else {}
# Validate recipients (to:) and drop bad ones:
for recipient in parse_list(targets):
result = is_email(recipient)
if result:
self.targets.append(result['full_email'])
continue
self.logger.warning(
'Dropped invalid email '
'({}) specified.'.format(recipient),
)
# Validate recipients (cc:) and drop bad ones:
for recipient in parse_list(cc):
result = is_email(recipient)
if result:
self.cc.add(result['full_email'])
continue
self.logger.warning(
'Dropped invalid Carbon Copy email '
'({}) specified.'.format(recipient),
)
# Validate recipients (bcc:) and drop bad ones:
for recipient in parse_list(bcc):
result = is_email(recipient)
if result:
self.bcc.add(result['full_email'])
continue
self.logger.warning(
'Dropped invalid Blind Carbon Copy email '
'({}) specified.'.format(recipient),
)
if len(self.targets) == 0:
# Notify ourselves
self.targets.append(self.from_email)
return
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Our URL parameters
params = self.url_parameters(privacy=privacy, *args, **kwargs)
if len(self.cc) > 0:
# Handle our Carbon Copy Addresses
params['cc'] = ','.join(self.cc)
if len(self.bcc) > 0:
# Handle our Blind Carbon Copy Addresses
params['bcc'] = ','.join(self.bcc)
if self.template:
# Handle our Template ID if if was specified
params['template'] = self.template
# Append our template_data into our parameter list
params.update(
{'+{}'.format(k): v for k, v in self.template_data.items()})
# a simple boolean check as to whether we display our target emails
# or not
has_targets = \
not (len(self.targets) == 1 and self.targets[0] == self.from_email)
return '{schema}://{apikey}:{from_email}/{targets}?{params}'.format(
schema=self.secure_protocol,
apikey=self.pprint(self.apikey, privacy, safe=''),
# never encode email since it plays a huge role in our hostname
from_email=self.from_email,
targets='' if not has_targets else '/'.join(
[NotifySendGrid.quote(x, safe='') for x in self.targets]),
params=NotifySendGrid.urlencode(params),
)
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform SendGrid Notification
"""
headers = {
'User-Agent': self.app_id,
'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(self.apikey),
}
# error tracking (used for function return)
has_error = False
# A Simple Email Payload Template
_payload = {
'personalizations': [{
# Placeholder
'to': [{'email': None}],
}],
'from': {
'email': self.from_email,
},
# A subject is a requirement, so if none is specified we must
# set a default with at least 1 character or SendGrid will deny
# our request
'subject': title if title else self.default_empty_subject,
'content': [{
'type': 'text/plain'
if self.notify_format == NotifyFormat.TEXT else 'text/html',
'value': body,
}],
}
if self.template:
_payload['template_id'] = self.template
if self.template_data:
_payload['personalizations'][0]['dynamic_template_data'] = \
{k: v for k, v in self.template_data.items()}
targets = list(self.targets)
while len(targets) > 0:
target = targets.pop(0)
# Create a copy of our template
payload = _payload.copy()
# the cc, bcc, to field must be unique or SendMail will fail, the
# below code prepares this by ensuring the target isn't in the cc
# list or bcc list. It also makes sure the cc list does not contain
# any of the bcc entries
cc = (self.cc - self.bcc - set([target]))
bcc = (self.bcc - set([target]))
# Set our target
payload['personalizations'][0]['to'][0]['email'] = target
if len(cc):
payload['personalizations'][0]['cc'] = \
[{'email': email} for email in cc]
if len(bcc):
payload['personalizations'][0]['bcc'] = \
[{'email': email} for email in bcc]
self.logger.debug('SendGrid POST URL: %s (cert_verify=%r)' % (
self.notify_url, self.verify_certificate,
))
self.logger.debug('SendGrid Payload: %s' % str(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
self.notify_url,
data=dumps(payload),
headers=headers,
verify=self.verify_certificate,
timeout=self.request_timeout,
)
if r.status_code not in (
requests.codes.ok, requests.codes.accepted):
# We had a problem
status_str = \
NotifySendGrid.http_response_code_lookup(
r.status_code, SENDGRID_HTTP_ERROR_MAP)
self.logger.warning(
'Failed to send SendGrid notification to {}: '
'{}{}error={}.'.format(
target,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# Mark our failure
has_error = True
continue
else:
self.logger.info(
'Sent SendGrid notification to {}.'.format(target))
except requests.RequestException as e:
self.logger.warning(
'A Connection error occurred sending SendGrid '
'notification to {}.'.format(target))
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to re-instantiate this object.
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
# Our URL looks like this:
# {schema}://{apikey}:{from_email}/{targets}
#
# which actually equates to:
# {schema}://{user}:{password}@{host}/{email1}/{email2}/etc..
# ^ ^ ^
# | | |
# apikey -from addr-
if not results.get('user'):
# An API Key as not properly specified
return None
if not results.get('password'):
# A From Email was not correctly specified
return None
# Prepare our API Key
results['apikey'] = NotifySendGrid.unquote(results['user'])
# Prepare our From Email Address
results['from_email'] = '{}@{}'.format(
NotifySendGrid.unquote(results['password']),
NotifySendGrid.unquote(results['host']),
)
# Acquire our targets
results['targets'] = NotifySendGrid.split_path(results['fullpath'])
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifySendGrid.parse_list(results['qsd']['to'])
# Handle Carbon Copy Addresses
if 'cc' in results['qsd'] and len(results['qsd']['cc']):
results['cc'] = \
NotifySendGrid.parse_list(results['qsd']['cc'])
# Handle Blind Carbon Copy Addresses
if 'bcc' in results['qsd'] and len(results['qsd']['bcc']):
results['bcc'] = \
NotifySendGrid.parse_list(results['qsd']['bcc'])
# Handle Blind Carbon Copy Addresses
if 'template' in results['qsd'] and len(results['qsd']['template']):
results['template'] = \
NotifySendGrid.unquote(results['qsd']['template'])
# Add any template substitutions
results['template_data'] = results['qsd+']
return results
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.